aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorMatei Zaharia <matei@eecs.berkeley.edu>2010-11-13 22:07:08 -0800
committerMatei Zaharia <matei@eecs.berkeley.edu>2010-11-13 22:07:08 -0800
commit89fcd96702d6aa963192f0221922d2702820048f (patch)
tree99dcdc918425ec68170d02bf2dc8eb6ab2bd400b /lib
parentb7574201d56e0466b874a24b7ee6b09e29e0c2d8 (diff)
downloadspark-89fcd96702d6aa963192f0221922d2702820048f.tar.gz
spark-89fcd96702d6aa963192f0221922d2702820048f.tar.bz2
spark-89fcd96702d6aa963192f0221922d2702820048f.zip
Initial work to get Spark compiling with SBT 0.7.5 RC0
Diffstat (limited to 'lib')
-rw-r--r--lib/apache-log4j-1.2.16/log4j-1.2.16.jarbin0 -> 481534 bytes
-rw-r--r--lib/asm-3.2/.DS_Storebin0 -> 6148 bytes
-rw-r--r--lib/asm-3.2/lib/all/README.txt3
-rw-r--r--lib/asm-3.2/lib/all/asm-all-3.2.jarbin0 -> 207939 bytes
-rw-r--r--lib/asm-3.2/lib/all/asm-all-3.2.pom15
-rw-r--r--lib/asm-3.2/lib/all/asm-debug-all-3.2.jarbin0 -> 305420 bytes
-rw-r--r--lib/asm-3.2/lib/all/asm-debug-all-3.2.pom15
-rw-r--r--lib/asm-3.2/lib/asm-3.2.jarbin0 -> 43401 bytes
-rw-r--r--lib/asm-3.2/lib/asm-3.2.pom14
-rw-r--r--lib/asm-3.2/lib/asm-analysis-3.2.jarbin0 -> 17988 bytes
-rw-r--r--lib/asm-3.2/lib/asm-analysis-3.2.pom21
-rw-r--r--lib/asm-3.2/lib/asm-commons-3.2.jarbin0 -> 37619 bytes
-rw-r--r--lib/asm-3.2/lib/asm-commons-3.2.pom21
-rw-r--r--lib/asm-3.2/lib/asm-parent-3.2.pom136
-rw-r--r--lib/asm-3.2/lib/asm-tree-3.2.jarbin0 -> 21881 bytes
-rw-r--r--lib/asm-3.2/lib/asm-tree-3.2.pom21
-rw-r--r--lib/asm-3.2/lib/asm-util-3.2.jarbin0 -> 36552 bytes
-rw-r--r--lib/asm-3.2/lib/asm-util-3.2.pom21
-rw-r--r--lib/asm-3.2/lib/asm-xml-3.2.jarbin0 -> 51856 bytes
-rw-r--r--lib/asm-3.2/lib/asm-xml-3.2.pom21
-rw-r--r--lib/colt.jarbin0 -> 581945 bytes
-rw-r--r--lib/guava-r07/COPYING202
-rw-r--r--lib/guava-r07/README28
-rw-r--r--lib/guava-r07/guava-r07.jarbin0 -> 1075964 bytes
-rw-r--r--lib/hadoop-0.20.0/.DS_Storebin0 -> 6148 bytes
-rw-r--r--lib/hadoop-0.20.0/CHANGES.txt8288
-rw-r--r--lib/hadoop-0.20.0/LICENSE.txt244
-rw-r--r--lib/hadoop-0.20.0/NOTICE.txt2
-rw-r--r--lib/hadoop-0.20.0/README.txt31
-rwxr-xr-xlib/hadoop-0.20.0/bin/hadoop289
-rwxr-xr-xlib/hadoop-0.20.0/bin/hadoop-config.sh68
-rwxr-xr-xlib/hadoop-0.20.0/bin/hadoop-daemon.sh143
-rwxr-xr-xlib/hadoop-0.20.0/bin/hadoop-daemons.sh34
-rwxr-xr-xlib/hadoop-0.20.0/bin/rcc99
-rwxr-xr-xlib/hadoop-0.20.0/bin/slaves.sh68
-rwxr-xr-xlib/hadoop-0.20.0/bin/start-all.sh30
-rwxr-xr-xlib/hadoop-0.20.0/bin/start-balancer.sh25
-rwxr-xr-xlib/hadoop-0.20.0/bin/start-dfs.sh52
-rwxr-xr-xlib/hadoop-0.20.0/bin/start-mapred.sh29
-rwxr-xr-xlib/hadoop-0.20.0/bin/stop-all.sh27
-rwxr-xr-xlib/hadoop-0.20.0/bin/stop-balancer.sh26
-rwxr-xr-xlib/hadoop-0.20.0/bin/stop-dfs.sh29
-rwxr-xr-xlib/hadoop-0.20.0/bin/stop-mapred.sh28
-rw-r--r--lib/hadoop-0.20.0/build.xml1796
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/Pipes.hh258
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/SerialUtils.hh169
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/StringUtils.hh81
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/TemplateFactory.hh96
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-amd64-64/lib/libhadooppipes.abin0 -> 318270 bytes
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-amd64-64/lib/libhadooputils.abin0 -> 88620 bytes
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/Pipes.hh258
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/SerialUtils.hh169
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/StringUtils.hh81
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/TemplateFactory.hh96
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhadooppipes.abin0 -> 226390 bytes
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhadooputils.abin0 -> 62576 bytes
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.la41
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.sobin0 -> 41611 bytes
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.so.0bin0 -> 41611 bytes
-rw-r--r--lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.so.0.0.0bin0 -> 41611 bytes
-rw-r--r--lib/hadoop-0.20.0/conf/capacity-scheduler.xml156
-rw-r--r--lib/hadoop-0.20.0/conf/configuration.xsl24
-rw-r--r--lib/hadoop-0.20.0/conf/core-site.xml8
-rw-r--r--lib/hadoop-0.20.0/conf/hadoop-env.sh54
-rw-r--r--lib/hadoop-0.20.0/conf/hadoop-metrics.properties40
-rw-r--r--lib/hadoop-0.20.0/conf/hadoop-policy.xml97
-rw-r--r--lib/hadoop-0.20.0/conf/hdfs-site.xml8
-rw-r--r--lib/hadoop-0.20.0/conf/log4j.properties94
-rw-r--r--lib/hadoop-0.20.0/conf/mapred-site.xml8
-rw-r--r--lib/hadoop-0.20.0/conf/masters1
-rw-r--r--lib/hadoop-0.20.0/conf/slaves1
-rw-r--r--lib/hadoop-0.20.0/conf/ssl-client.xml.example57
-rw-r--r--lib/hadoop-0.20.0/conf/ssl-server.xml.example55
-rw-r--r--lib/hadoop-0.20.0/contrib/capacity-scheduler/hadoop-0.20.0-capacity-scheduler.jarbin0 -> 51224 bytes
-rw-r--r--lib/hadoop-0.20.0/contrib/datajoin/hadoop-0.20.0-datajoin.jarbin0 -> 12667 bytes
-rw-r--r--lib/hadoop-0.20.0/contrib/eclipse-plugin/hadoop-0.20.0-eclipse-plugin.jarbin0 -> 3009728 bytes
-rw-r--r--lib/hadoop-0.20.0/contrib/fairscheduler/hadoop-0.20.0-fairscheduler.jarbin0 -> 37087 bytes
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/README30
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy170
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-config.sh67
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh141
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemons.sh34
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh68
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/start-hdfsproxy.sh37
-rwxr-xr-xlib/hadoop-0.20.0/contrib/hdfsproxy/bin/stop-hdfsproxy.sh28
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/build.xml183
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/configuration.xsl24
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-default.xml59
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh44
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh.template44
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-hosts1
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/log4j.properties61
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-certs.xml26
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-permissions.xml28
-rw-r--r--lib/hadoop-0.20.0/contrib/hdfsproxy/hdfsproxy-1.0.jarbin0 -> 21572 bytes
-rw-r--r--lib/hadoop-0.20.0/contrib/index/hadoop-0.20.0-index.jarbin0 -> 63178 bytes
-rw-r--r--lib/hadoop-0.20.0/contrib/streaming/hadoop-0.20.0-streaming.jarbin0 -> 68304 bytes
-rw-r--r--lib/hadoop-0.20.0/contrib/thriftfs/hadoop-0.20.0-thriftfs.jarbin0 -> 10434 bytes
-rwxr-xr-xlib/hadoop-0.20.0/contrib/vaidya/bin/vaidya.sh47
-rw-r--r--lib/hadoop-0.20.0/contrib/vaidya/conf/postex_diagnosis_tests.xml104
-rw-r--r--lib/hadoop-0.20.0/contrib/vaidya/hadoop-0.20.0-vaidya.jarbin0 -> 42201 bytes
-rw-r--r--lib/hadoop-0.20.0/hadoop-0.20.0-ant.jarbin0 -> 6839 bytes
-rw-r--r--lib/hadoop-0.20.0/hadoop-0.20.0-core.jarbin0 -> 2585066 bytes
-rw-r--r--lib/hadoop-0.20.0/hadoop-0.20.0-examples.jarbin0 -> 142465 bytes
-rw-r--r--lib/hadoop-0.20.0/hadoop-0.20.0-test.jarbin0 -> 1440518 bytes
-rw-r--r--lib/hadoop-0.20.0/hadoop-0.20.0-tools.jarbin0 -> 69804 bytes
-rw-r--r--lib/hadoop-0.20.0/ivy.xml261
-rw-r--r--lib/hadoop-0.20.0/ivy/hadoop-core.pom257
-rw-r--r--lib/hadoop-0.20.0/ivy/ivy-2.0.0-rc2.jarbin0 -> 893199 bytes
-rw-r--r--lib/hadoop-0.20.0/ivy/ivysettings.xml81
-rw-r--r--lib/hadoop-0.20.0/ivy/libraries.properties71
-rw-r--r--lib/hadoop-0.20.0/lib/.DS_Storebin0 -> 6148 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/commons-cli-2.0-SNAPSHOT.jarbin0 -> 258337 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/commons-codec-1.3.jarbin0 -> 46725 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/commons-el-1.0.jarbin0 -> 112341 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/commons-httpclient-3.0.1.jarbin0 -> 279781 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/commons-logging-1.0.4.jarbin0 -> 38015 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/commons-logging-api-1.0.4.jarbin0 -> 26202 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/commons-net-1.4.1.jarbin0 -> 180792 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/core-3.1.1.jarbin0 -> 3566844 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/hsqldb-1.8.0.10.LICENSE.txt66
-rw-r--r--lib/hadoop-0.20.0/lib/hsqldb-1.8.0.10.jarbin0 -> 706710 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/jasper-compiler-5.5.12.jarbin0 -> 405086 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/jasper-runtime-5.5.12.jarbin0 -> 76698 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/jdiff/hadoop_0.17.0.xml43272
-rw-r--r--lib/hadoop-0.20.0/lib/jdiff/hadoop_0.18.1.xml44778
-rw-r--r--lib/hadoop-0.20.0/lib/jdiff/hadoop_0.18.2.xml38788
-rw-r--r--lib/hadoop-0.20.0/lib/jdiff/hadoop_0.18.3.xml38826
-rw-r--r--lib/hadoop-0.20.0/lib/jdiff/hadoop_0.19.0.xml43972
-rw-r--r--lib/hadoop-0.20.0/lib/jdiff/hadoop_0.19.1.xml44195
-rw-r--r--lib/hadoop-0.20.0/lib/jdiff/hadoop_0.20.0.xml52140
-rw-r--r--lib/hadoop-0.20.0/lib/jets3t-0.6.1.jarbin0 -> 321806 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/jetty-6.1.14.jarbin0 -> 516429 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/jetty-util-6.1.14.jarbin0 -> 163121 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/jsp-2.1/jsp-2.1.jarbin0 -> 1024681 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/jsp-2.1/jsp-api-2.1.jarbin0 -> 134910 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/junit-3.8.1.jarbin0 -> 121070 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/kfs-0.2.2.jarbin0 -> 11428 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/kfs-0.2.LICENSE.txt202
-rw-r--r--lib/hadoop-0.20.0/lib/log4j-1.2.15.jarbin0 -> 391834 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/native/.DS_Storebin0 -> 6148 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.abin0 -> 101536 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.la35
-rw-r--r--lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.sobin0 -> 64941 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.so.1bin0 -> 64941 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.so.1.0.0bin0 -> 64941 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.abin0 -> 76446 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.la35
-rw-r--r--lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.sobin0 -> 59620 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.so.1bin0 -> 59620 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.so.1.0.0bin0 -> 59620 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/oro-2.0.8.jarbin0 -> 65261 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/servlet-api-2.5-6.1.14.jarbin0 -> 132368 bytes
-rw-r--r--lib/hadoop-0.20.0/lib/xmlenc-0.52.jarbin0 -> 15010 bytes
-rw-r--r--lib/hadoop-0.20.0/librecordio/librecordio.abin0 -> 2520838 bytes
-rw-r--r--lib/hadoop-0.20.0/webapps/datanode/WEB-INF/web.xml40
-rw-r--r--lib/hadoop-0.20.0/webapps/hdfs/WEB-INF/web.xml40
-rw-r--r--lib/hadoop-0.20.0/webapps/hdfs/index.html20
-rw-r--r--lib/hadoop-0.20.0/webapps/job/WEB-INF/web.xml180
-rw-r--r--lib/hadoop-0.20.0/webapps/job/index.html20
-rw-r--r--lib/hadoop-0.20.0/webapps/static/hadoop-logo.jpgbin0 -> 9443 bytes
-rw-r--r--lib/hadoop-0.20.0/webapps/static/hadoop.css134
-rw-r--r--lib/hadoop-0.20.0/webapps/static/jobconf.xsl18
-rw-r--r--lib/hadoop-0.20.0/webapps/static/jobtracker.js151
-rw-r--r--lib/hadoop-0.20.0/webapps/task/WEB-INF/web.xml20
-rw-r--r--lib/hadoop-0.20.0/webapps/task/index.html1
-rw-r--r--lib/jetty-7.1.6.v20100715/jetty-server-7.1.6.v20100715.jarbin0 -> 647178 bytes
-rw-r--r--lib/jetty-7.1.6.v20100715/servlet-api-2.5.jarbin0 -> 105112 bytes
-rw-r--r--lib/jline.jarbin0 -> 87543 bytes
-rw-r--r--lib/liblzf-3.5/Changes125
-rw-r--r--lib/liblzf-3.5/LICENSE27
-rw-r--r--lib/liblzf-3.5/Makefile66
-rw-r--r--lib/liblzf-3.5/Makefile.in66
-rw-r--r--lib/liblzf-3.5/README29
-rw-r--r--lib/liblzf-3.5/config.h17
-rw-r--r--lib/liblzf-3.5/config.h.in16
-rw-r--r--lib/liblzf-3.5/config.log515
-rwxr-xr-xlib/liblzf-3.5/config.status826
-rwxr-xr-xlib/liblzf-3.5/configure7871
-rw-r--r--lib/liblzf-3.5/configure.ac25
-rw-r--r--lib/liblzf-3.5/crc32.h65
-rw-r--r--lib/liblzf-3.5/cs/CLZF.cs344
-rw-r--r--lib/liblzf-3.5/cs/README7
-rwxr-xr-xlib/liblzf-3.5/install-sh251
-rw-r--r--lib/liblzf-3.5/lzf.c537
-rw-r--r--lib/liblzf-3.5/lzf.h100
-rw-r--r--lib/liblzf-3.5/lzfP.h159
-rw-r--r--lib/liblzf-3.5/lzf_c.c296
-rw-r--r--lib/liblzf-3.5/lzf_d.c148
-rw-r--r--lib/mesos.jarbin0 -> 33618 bytes
-rw-r--r--lib/scalacheck_2.8.0-1.7.jarbin0 -> 745883 bytes
-rw-r--r--lib/scalatest-1.2/LICENSE202
-rw-r--r--lib/scalatest-1.2/NOTICE7
-rw-r--r--lib/scalatest-1.2/README.txt58
-rw-r--r--lib/scalatest-1.2/scalatest-1.2.jarbin0 -> 1784096 bytes
-rw-r--r--lib/slf4j-1.6.1/slf4j-api-1.6.1.jarbin0 -> 25496 bytes
-rw-r--r--lib/slf4j-1.6.1/slf4j-log4j12-1.6.1.jarbin0 -> 9753 bytes
197 files changed, 334234 insertions, 0 deletions
diff --git a/lib/apache-log4j-1.2.16/log4j-1.2.16.jar b/lib/apache-log4j-1.2.16/log4j-1.2.16.jar
new file mode 100644
index 0000000000..3f9d847618
--- /dev/null
+++ b/lib/apache-log4j-1.2.16/log4j-1.2.16.jar
Binary files differ
diff --git a/lib/asm-3.2/.DS_Store b/lib/asm-3.2/.DS_Store
new file mode 100644
index 0000000000..52b0f12a32
--- /dev/null
+++ b/lib/asm-3.2/.DS_Store
Binary files differ
diff --git a/lib/asm-3.2/lib/all/README.txt b/lib/asm-3.2/lib/all/README.txt
new file mode 100644
index 0000000000..d7c96a5edb
--- /dev/null
+++ b/lib/asm-3.2/lib/all/README.txt
@@ -0,0 +1,3 @@
+It is highly recommended to use only the necessary ASM jars for your
+application instead of using the asm-all jar, unless you really need
+all ASM packages. \ No newline at end of file
diff --git a/lib/asm-3.2/lib/all/asm-all-3.2.jar b/lib/asm-3.2/lib/all/asm-all-3.2.jar
new file mode 100644
index 0000000000..d0ad60ed0a
--- /dev/null
+++ b/lib/asm-3.2/lib/all/asm-all-3.2.jar
Binary files differ
diff --git a/lib/asm-3.2/lib/all/asm-all-3.2.pom b/lib/asm-3.2/lib/all/asm-all-3.2.pom
new file mode 100644
index 0000000000..9899a54c3b
--- /dev/null
+++ b/lib/asm-3.2/lib/all/asm-all-3.2.pom
@@ -0,0 +1,15 @@
+<project>
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>asm</groupId>
+ <artifactId>asm-parent</artifactId>
+ <version>3.2</version>
+ </parent>
+
+ <name>ASM All</name>
+ <groupId>asm</groupId>
+ <artifactId>asm-all</artifactId>
+ <packaging>jar</packaging>
+
+</project>
diff --git a/lib/asm-3.2/lib/all/asm-debug-all-3.2.jar b/lib/asm-3.2/lib/all/asm-debug-all-3.2.jar
new file mode 100644
index 0000000000..94b8549142
--- /dev/null
+++ b/lib/asm-3.2/lib/all/asm-debug-all-3.2.jar
Binary files differ
diff --git a/lib/asm-3.2/lib/all/asm-debug-all-3.2.pom b/lib/asm-3.2/lib/all/asm-debug-all-3.2.pom
new file mode 100644
index 0000000000..9899a54c3b
--- /dev/null
+++ b/lib/asm-3.2/lib/all/asm-debug-all-3.2.pom
@@ -0,0 +1,15 @@
+<project>
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>asm</groupId>
+ <artifactId>asm-parent</artifactId>
+ <version>3.2</version>
+ </parent>
+
+ <name>ASM All</name>
+ <groupId>asm</groupId>
+ <artifactId>asm-all</artifactId>
+ <packaging>jar</packaging>
+
+</project>
diff --git a/lib/asm-3.2/lib/asm-3.2.jar b/lib/asm-3.2/lib/asm-3.2.jar
new file mode 100644
index 0000000000..334e7fdc7f
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-3.2.jar
Binary files differ
diff --git a/lib/asm-3.2/lib/asm-3.2.pom b/lib/asm-3.2/lib/asm-3.2.pom
new file mode 100644
index 0000000000..c714db09b2
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-3.2.pom
@@ -0,0 +1,14 @@
+<project>
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <artifactId>asm-parent</artifactId>
+ <groupId>asm</groupId>
+ <version>3.2</version>
+ </parent>
+
+ <name>ASM Core</name>
+ <artifactId>asm</artifactId>
+ <packaging>jar</packaging>
+
+</project>
diff --git a/lib/asm-3.2/lib/asm-analysis-3.2.jar b/lib/asm-3.2/lib/asm-analysis-3.2.jar
new file mode 100644
index 0000000000..40ee3151cb
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-analysis-3.2.jar
Binary files differ
diff --git a/lib/asm-3.2/lib/asm-analysis-3.2.pom b/lib/asm-3.2/lib/asm-analysis-3.2.pom
new file mode 100644
index 0000000000..b3933387af
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-analysis-3.2.pom
@@ -0,0 +1,21 @@
+<project>
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <artifactId>asm-parent</artifactId>
+ <groupId>asm</groupId>
+ <version>3.2</version>
+ </parent>
+
+ <name>ASM Analysis</name>
+ <artifactId>asm-analysis</artifactId>
+ <packaging>jar</packaging>
+
+ <dependencies>
+ <dependency>
+ <artifactId>asm-tree</artifactId>
+ <groupId>asm</groupId>
+ </dependency>
+ </dependencies>
+
+</project>
diff --git a/lib/asm-3.2/lib/asm-commons-3.2.jar b/lib/asm-3.2/lib/asm-commons-3.2.jar
new file mode 100644
index 0000000000..8dfed0a9b7
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-commons-3.2.jar
Binary files differ
diff --git a/lib/asm-3.2/lib/asm-commons-3.2.pom b/lib/asm-3.2/lib/asm-commons-3.2.pom
new file mode 100644
index 0000000000..8517715b4a
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-commons-3.2.pom
@@ -0,0 +1,21 @@
+<project>
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <artifactId>asm-parent</artifactId>
+ <groupId>asm</groupId>
+ <version>3.2</version>
+ </parent>
+
+ <name>ASM Commons</name>
+ <artifactId>asm-commons</artifactId>
+ <packaging>jar</packaging>
+
+ <dependencies>
+ <dependency>
+ <artifactId>asm-tree</artifactId>
+ <groupId>asm</groupId>
+ </dependency>
+ </dependencies>
+
+</project>
diff --git a/lib/asm-3.2/lib/asm-parent-3.2.pom b/lib/asm-3.2/lib/asm-parent-3.2.pom
new file mode 100644
index 0000000000..c220347f6a
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-parent-3.2.pom
@@ -0,0 +1,136 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+ http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <artifactId>asm-parent</artifactId>
+ <groupId>asm</groupId>
+ <version>3.2</version>
+ <packaging>pom</packaging>
+
+ <name>ASM</name>
+ <description>A very small and fast Java bytecode manipulation framework</description>
+ <url>http://asm.objectweb.org/</url>
+
+ <organization>
+ <name>ObjectWeb</name>
+ <url>http://www.objectweb.org/</url>
+ </organization>
+ <inceptionYear>2000</inceptionYear>
+
+ <licenses>
+ <license>
+ <name>BSD</name>
+ <url>http://asm.objectweb.org/license.html</url>
+ </license>
+ </licenses>
+
+ <developers>
+ <developer>
+ <name>Eric Bruneton</name>
+ <id>ebruneton</id>
+ <email>Eric.Bruneton@rd.francetelecom.com</email>
+ <roles>
+ <role>Creator</role>
+ <role>Java Developer</role>
+ </roles>
+ </developer>
+ <developer>
+ <name>Eugene Kuleshov</name>
+ <id>eu</id>
+ <email>eu@javatx.org</email>
+ <roles>
+ <role>Java Developer</role>
+ </roles>
+ </developer>
+ </developers>
+
+ <scm>
+ <connection>scm:cvs:pserver:anonymous:@cvs.forge.objectweb.org:/cvsroot/asm:asm</connection>
+ <developerConnection>scm:cvs:ext:${maven.username}@cvs.forge.objectweb.org:/cvsroot/asm:asm</developerConnection>
+ <url>http://cvs.forge.objectweb.org/cgi-bin/viewcvs.cgi/asm/asm/</url>
+ </scm>
+
+ <issueManagement>
+ <url>http://forge.objectweb.org/tracker/?group_id=23</url>
+ </issueManagement>
+
+ <dependencyManagement>
+ <dependencies>
+
+ <dependency>
+ <artifactId>asm</artifactId>
+ <groupId>${project.groupId}</groupId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <artifactId>asm-tree</artifactId>
+ <groupId>${project.groupId}</groupId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <artifactId>asm-analysis</artifactId>
+ <groupId>${project.groupId}</groupId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <artifactId>asm-commons</artifactId>
+ <groupId>${project.groupId}</groupId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <artifactId>asm-util</artifactId>
+ <groupId>${project.groupId}</groupId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <artifactId>asm-xml</artifactId>
+ <groupId>${project.groupId}</groupId>
+ <version>${project.version}</version>
+ </dependency>
+
+ </dependencies>
+ </dependencyManagement>
+
+ <mailingLists>
+ <mailingList>
+ <name>ASM Users List</name>
+ <subscribe>sympa@ow2.org?subject=subscribe%20asm</subscribe>
+ <unsubscribe>sympa@ow2.org?subject=unsubscribe%20asm</unsubscribe>
+ <post>asm@ow2.org</post>
+ <archive>http://www.ow2.org/wws/arc/asm</archive>
+ </mailingList>
+ <mailingList>
+ <name>ASM Team List</name>
+ <subscribe>sympa@ow2.org?subject=subscribe%20asm-team</subscribe>
+ <unsubscribe>sympa@ow2.org?subject=unsubscribe%20asm-team</unsubscribe>
+ <post>asm-team@ow2.org</post>
+ <archive>http://www.ow2.org/wws/arc/asm-team</archive>
+ </mailingList>
+ </mailingLists>
+
+ <distributionManagement>
+ <downloadUrl>http://mojo.codehaus.org/my-project</downloadUrl>
+ <repository>
+ <id>objectweb</id>
+ <uniqueVersion>false</uniqueVersion>
+ <name>ObjectWeb Maven 2.0 Repository</name>
+ <url>dav:https://maven.forge.objectweb.org:8002/maven2/</url>
+ <layout>default</layout>
+ </repository>
+ <snapshotRepository>
+ <id>objectweb.snapshots</id>
+ <uniqueVersion>false</uniqueVersion>
+ <name>ObjectWeb Maven 2.0 Snapshot Repository</name>
+ <url>dav:https://maven.forge.objectweb.org:8002/maven2-snapshot/</url>
+ <layout>default</layout>
+ </snapshotRepository>
+ </distributionManagement>
+
+</project>
diff --git a/lib/asm-3.2/lib/asm-tree-3.2.jar b/lib/asm-3.2/lib/asm-tree-3.2.jar
new file mode 100644
index 0000000000..b21fb86a92
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-tree-3.2.jar
Binary files differ
diff --git a/lib/asm-3.2/lib/asm-tree-3.2.pom b/lib/asm-3.2/lib/asm-tree-3.2.pom
new file mode 100644
index 0000000000..9f454528f4
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-tree-3.2.pom
@@ -0,0 +1,21 @@
+<project>
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <artifactId>asm-parent</artifactId>
+ <groupId>asm</groupId>
+ <version>3.2</version>
+ </parent>
+
+ <name>ASM Tree</name>
+ <artifactId>asm-tree</artifactId>
+ <packaging>jar</packaging>
+
+ <dependencies>
+ <dependency>
+ <artifactId>asm</artifactId>
+ <groupId>asm</groupId>
+ </dependency>
+ </dependencies>
+
+</project>
diff --git a/lib/asm-3.2/lib/asm-util-3.2.jar b/lib/asm-3.2/lib/asm-util-3.2.jar
new file mode 100644
index 0000000000..499d229034
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-util-3.2.jar
Binary files differ
diff --git a/lib/asm-3.2/lib/asm-util-3.2.pom b/lib/asm-3.2/lib/asm-util-3.2.pom
new file mode 100644
index 0000000000..e302b0f356
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-util-3.2.pom
@@ -0,0 +1,21 @@
+<project>
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <artifactId>asm-parent</artifactId>
+ <groupId>asm</groupId>
+ <version>3.2</version>
+ </parent>
+
+ <name>ASM Util</name>
+ <artifactId>asm-util</artifactId>
+ <packaging>jar</packaging>
+
+ <dependencies>
+ <dependency>
+ <artifactId>asm-tree</artifactId>
+ <groupId>asm</groupId>
+ </dependency>
+ </dependencies>
+
+</project>
diff --git a/lib/asm-3.2/lib/asm-xml-3.2.jar b/lib/asm-3.2/lib/asm-xml-3.2.jar
new file mode 100644
index 0000000000..31b31b56fe
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-xml-3.2.jar
Binary files differ
diff --git a/lib/asm-3.2/lib/asm-xml-3.2.pom b/lib/asm-3.2/lib/asm-xml-3.2.pom
new file mode 100644
index 0000000000..0f3de1f2ab
--- /dev/null
+++ b/lib/asm-3.2/lib/asm-xml-3.2.pom
@@ -0,0 +1,21 @@
+<project>
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <artifactId>asm-parent</artifactId>
+ <groupId>asm</groupId>
+ <version>3.2</version>
+ </parent>
+
+ <name>ASM XML</name>
+ <artifactId>asm-xml</artifactId>
+ <packaging>jar</packaging>
+
+ <dependencies>
+ <dependency>
+ <artifactId>asm-util</artifactId>
+ <groupId>asm</groupId>
+ </dependency>
+ </dependencies>
+
+</project>
diff --git a/lib/colt.jar b/lib/colt.jar
new file mode 100644
index 0000000000..a7192f68b3
--- /dev/null
+++ b/lib/colt.jar
Binary files differ
diff --git a/lib/guava-r07/COPYING b/lib/guava-r07/COPYING
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/lib/guava-r07/COPYING
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/lib/guava-r07/README b/lib/guava-r07/README
new file mode 100644
index 0000000000..a0e832dd54
--- /dev/null
+++ b/lib/guava-r07/README
@@ -0,0 +1,28 @@
+Guava: Google Core Libraries for Java
+
+Requires JDK 5 or higher.
+
+Project page:
+ http://guava-libraries.googlecode.com
+
+Ask "how-to" and "why-didn't-it-work" questions at:
+ http://www.stackoverflow.com/questions/ask
+ (use the "guava" tag so we'll see it)
+
+Ask discussion questions at:
+ http://groups.google.com/group/guava-discuss
+
+Subscribe to project updates in your feed reader:
+ http://code.google.com/feeds/p/guava-libraries/updates/basic
+
+Warnings:
+
+All APIs marked @Beta at the class or method level are subject to
+change. If your code is a library or framework that users outside
+your control will include on their classpath, do not use @Beta
+APIs (at least without repackaging them somehow).
+
+Serialized forms of ALL objects are subject to change. Do not
+persist these and assume they can be read by a future version of
+the library.
+
diff --git a/lib/guava-r07/guava-r07.jar b/lib/guava-r07/guava-r07.jar
new file mode 100644
index 0000000000..a6c9ce02df
--- /dev/null
+++ b/lib/guava-r07/guava-r07.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/.DS_Store b/lib/hadoop-0.20.0/.DS_Store
new file mode 100644
index 0000000000..81f4e05e09
--- /dev/null
+++ b/lib/hadoop-0.20.0/.DS_Store
Binary files differ
diff --git a/lib/hadoop-0.20.0/CHANGES.txt b/lib/hadoop-0.20.0/CHANGES.txt
new file mode 100644
index 0000000000..95c8b5c08b
--- /dev/null
+++ b/lib/hadoop-0.20.0/CHANGES.txt
@@ -0,0 +1,8288 @@
+Hadoop Change Log
+
+Release 0.20.0 - 2009-04-15
+
+ INCOMPATIBLE CHANGES
+
+ HADOOP-4210. Fix findbugs warnings for equals implementations of mapred ID
+ classes. Removed public, static ID::read and ID::forName; made ID an
+ abstract class. (Suresh Srinivas via cdouglas)
+
+ HADOOP-4253. Fix various warnings generated by findbugs.
+ Following deprecated methods in RawLocalFileSystem are removed:
+ public String getName()
+ public void lock(Path p, boolean shared)
+ public void release(Path p)
+ (Suresh Srinivas via johan)
+
+ HADOOP-4618. Move http server from FSNamesystem into NameNode.
+ FSNamesystem.getNameNodeInfoPort() is removed.
+ FSNamesystem.getDFSNameNodeMachine() and FSNamesystem.getDFSNameNodePort()
+ replaced by FSNamesystem.getDFSNameNodeAddress().
+ NameNode(bindAddress, conf) is removed.
+ (shv)
+
+ HADOOP-4567. GetFileBlockLocations returns the NetworkTopology
+ information of the machines where the blocks reside. (dhruba)
+
+ HADOOP-4435. The JobTracker WebUI displays the amount of heap memory
+ in use. (dhruba)
+
+ HADOOP-4628. Move Hive into a standalone subproject. (omalley)
+
+ HADOOP-4188. Removes task's dependency on concrete filesystems.
+ (Sharad Agarwal via ddas)
+
+ HADOOP-1650. Upgrade to Jetty 6. (cdouglas)
+
+ HADOOP-3986. Remove static Configuration from JobClient. (Amareshwari
+ Sriramadasu via cdouglas)
+ JobClient::setCommandLineConfig is removed
+ JobClient::getCommandLineConfig is removed
+ JobShell, TestJobShell classes are removed
+
+ HADOOP-4422. S3 file systems should not create bucket.
+ (David Phillips via tomwhite)
+
+ HADOOP-4035. Support memory based scheduling in capacity scheduler.
+ (Vinod Kumar Vavilapalli via yhemanth)
+
+ HADOOP-3497. Fix bug in overly restrictive file globbing with a
+ PathFilter. (tomwhite)
+
+ HADOOP-4445. Replace running task counts with running task
+ percentage in capacity scheduler UI. (Sreekanth Ramakrishnan via
+ yhemanth)
+
+ HADOOP-4631. Splits the configuration into three parts - one for core,
+ one for mapred and the last one for HDFS. (Sharad Agarwal via cdouglas)
+
+ HADOOP-3344. Fix libhdfs build to use autoconf and build the same
+ architecture (32 vs 64 bit) of the JVM running Ant. The libraries for
+ pipes, utils, and libhdfs are now all in c++/<os_osarch_jvmdatamodel>/lib.
+ (Giridharan Kesavan via nigel)
+
+ HADOOP-4874. Remove LZO codec because of licensing issues. (omalley)
+
+ HADOOP-4970. The full path name of a file is preserved inside Trash.
+ (Prasad Chakka via dhruba)
+
+ HADOOP-4103. NameNode keeps a count of missing blocks. It warns on
+ WebUI if there are such blocks. '-report' and '-metaSave' have extra
+ info to track such blocks. (Raghu Angadi)
+
+ HADOOP-4783. Change permissions on history files on the jobtracker
+ to be only group readable instead of world readable.
+ (Amareshwari Sriramadasu via yhemanth)
+
+ HADOOP-5531. Removed Chukwa from Hadoop 0.20.0. (nigel)
+
+ NEW FEATURES
+
+ HADOOP-4575. Add a proxy service for relaying HsftpFileSystem requests.
+ Includes client authentication via user certificates and config-based
+ access control. (Kan Zhang via cdouglas)
+
+ HADOOP-4661. Add DistCh, a new tool for distributed ch{mod,own,grp}.
+ (szetszwo)
+
+ HADOOP-4709. Add several new features and bug fixes to Chukwa.
+ Added Hadoop Infrastructure Care Center (UI for visualize data collected
+ by Chukwa)
+ Added FileAdaptor for streaming small file in one chunk
+ Added compression to archive and demux output
+ Added unit tests and validation for agent, collector, and demux map
+ reduce job
+ Added database loader for loading demux output (sequence file) to jdbc
+ connected database
+ Added algorithm to distribute collector load more evenly
+ (Jerome Boulon, Eric Yang, Andy Konwinski, Ariel Rabkin via cdouglas)
+
+ HADOOP-4179. Add Vaidya tool to analyze map/reduce job logs for performanc
+ problems. (Suhas Gogate via omalley)
+
+ HADOOP-4029. Add NameNode storage information to the dfshealth page and
+ move DataNode information to a separated page. (Boris Shkolnik via
+ szetszwo)
+
+ HADOOP-4348. Add service-level authorization for Hadoop. (acmurthy)
+
+ HADOOP-4826. Introduce admin command saveNamespace. (shv)
+
+ HADOOP-3063 BloomMapFile - fail-fast version of MapFile for sparsely
+ populated key space (Andrzej Bialecki via stack)
+
+ HADOOP-1230. Add new map/reduce API and deprecate the old one. Generally,
+ the old code should work without problem. The new api is in
+ org.apache.hadoop.mapreduce and the old classes in org.apache.hadoop.mapred
+ are deprecated. Differences in the new API:
+ 1. All of the methods take Context objects that allow us to add new
+ methods without breaking compatability.
+ 2. Mapper and Reducer now have a "run" method that is called once and
+ contains the control loop for the task, which lets applications
+ replace it.
+ 3. Mapper and Reducer by default are Identity Mapper and Reducer.
+ 4. The FileOutputFormats use part-r-00000 for the output of reduce 0 and
+ part-m-00000 for the output of map 0.
+ 5. The reduce grouping comparator now uses the raw compare instead of
+ object compare.
+ 6. The number of maps in FileInputFormat is controlled by min and max
+ split size rather than min size and the desired number of maps.
+ (omalley)
+
+ HADOOP-3305. Use Ivy to manage dependencies. (Giridharan Kesavan
+ and Steve Loughran via cutting)
+
+ IMPROVEMENTS
+
+ HADOOP-4565. Added CombineFileInputFormat to use data locality information
+ to create splits. (dhruba via zshao)
+
+ HADOOP-4749. Added a new counter REDUCE_INPUT_BYTES. (Yongqiang He via
+ zshao)
+
+ HADOOP-4234. Fix KFS "glue" layer to allow applications to interface
+ with multiple KFS metaservers. (Sriram Rao via lohit)
+
+ HADOOP-4245. Update to latest version of KFS "glue" library jar.
+ (Sriram Rao via lohit)
+
+ HADOOP-4244. Change test-patch.sh to check Eclipse classpath no matter
+ it is run by Hudson or not. (szetszwo)
+
+ HADOOP-3180. Add name of missing class to WritableName.getClass
+ IOException. (Pete Wyckoff via omalley)
+
+ HADOOP-4178. Make the capacity scheduler's default values configurable.
+ (Sreekanth Ramakrishnan via omalley)
+
+ HADOOP-4262. Generate better error message when client exception has null
+ message. (stevel via omalley)
+
+ HADOOP-4226. Refactor and document LineReader to make it more readily
+ understandable. (Yuri Pradkin via cdouglas)
+
+ HADOOP-4238. When listing jobs, if scheduling information isn't available
+ print NA instead of empty output. (Sreekanth Ramakrishnan via johan)
+
+ HADOOP-4284. Support filters that apply to all requests, or global filters,
+ to HttpServer. (Kan Zhang via cdouglas)
+
+ HADOOP-4276. Improve the hashing functions and deserialization of the
+ mapred ID classes. (omalley)
+
+ HADOOP-4485. Add a compile-native ant task, as a shorthand. (enis)
+
+ HADOOP-4454. Allow # comments in slaves file. (Rama Ramasamy via omalley)
+
+ HADOOP-3461. Remove hdfs.StringBytesWritable. (szetszwo)
+
+ HADOOP-4437. Use Halton sequence instead of java.util.Random in
+ PiEstimator. (szetszwo)
+
+ HADOOP-4572. Change INode and its sub-classes to package private.
+ (szetszwo)
+
+ HADOOP-4187. Does a runtime lookup for JobConf/JobConfigurable, and if
+ found, invokes the appropriate configure method. (Sharad Agarwal via ddas)
+
+ HADOOP-4453. Improve ssl configuration and handling in HsftpFileSystem,
+ particularly when used with DistCp. (Kan Zhang via cdouglas)
+
+ HADOOP-4583. Several code optimizations in HDFS. (Suresh Srinivas via
+ szetszwo)
+
+ HADOOP-3923. Remove org.apache.hadoop.mapred.StatusHttpServer. (szetszwo)
+
+ HADOOP-4622. Explicitly specify interpretor for non-native
+ pipes binaries. (Fredrik Hedberg via johan)
+
+ HADOOP-4505. Add a unit test to test faulty setup task and cleanup
+ task killing the job. (Amareshwari Sriramadasu via johan)
+
+ HADOOP-4608. Don't print a stack trace when the example driver gets an
+ unknown program to run. (Edward Yoon via omalley)
+
+ HADOOP-4645. Package HdfsProxy contrib project without the extra level
+ of directories. (Kan Zhang via omalley)
+
+ HADOOP-4126. Allow access to HDFS web UI on EC2 (tomwhite via omalley)
+
+ HADOOP-4612. Removes RunJar's dependency on JobClient.
+ (Sharad Agarwal via ddas)
+
+ HADOOP-4185. Adds setVerifyChecksum() method to FileSystem.
+ (Sharad Agarwal via ddas)
+
+ HADOOP-4523. Prevent too many tasks scheduled on a node from bringing
+ it down by monitoring for cumulative memory usage across tasks.
+ (Vinod Kumar Vavilapalli via yhemanth)
+
+ HADOOP-4640. Adds an input format that can split lzo compressed
+ text files. (johan)
+
+ HADOOP-4666. Launch reduces only after a few maps have run in the
+ Fair Scheduler. (Matei Zaharia via johan)
+
+ HADOOP-4339. Remove redundant calls from FileSystem/FsShell when
+ generating/processing ContentSummary. (David Phillips via cdouglas)
+
+ HADOOP-2774. Add counters tracking records spilled to disk in MapTask and
+ ReduceTask. (Ravi Gummadi via cdouglas)
+
+ HADOOP-4513. Initialize jobs asynchronously in the capacity scheduler.
+ (Sreekanth Ramakrishnan via yhemanth)
+
+ HADOOP-4649. Improve abstraction for spill indices. (cdouglas)
+
+ HADOOP-3770. Add gridmix2, an iteration on the gridmix benchmark. (Runping
+ Qi via cdouglas)
+
+ HADOOP-4708. Add support for dfsadmin commands in TestCLI. (Boris Shkolnik
+ via cdouglas)
+
+ HADOOP-4758. Add a splitter for metrics contexts to support more than one
+ type of collector. (cdouglas)
+
+ HADOOP-4722. Add tests for dfsadmin quota error messages. (Boris Shkolnik
+ via cdouglas)
+
+ HADOOP-4690. fuse-dfs - create source file/function + utils + config +
+ main source files. (pete wyckoff via mahadev)
+
+ HADOOP-3750. Fix and enforce module dependencies. (Sharad Agarwal via
+ tomwhite)
+
+ HADOOP-4747. Speed up FsShell::ls by removing redundant calls to the
+ filesystem. (David Phillips via cdouglas)
+
+ HADOOP-4305. Improves the blacklisting strategy, whereby, tasktrackers
+ that are blacklisted are not given tasks to run from other jobs, subject
+ to the following conditions (all must be met):
+ 1) The TaskTracker has been blacklisted by at least 4 jobs (configurable)
+ 2) The TaskTracker has been blacklisted 50% more number of times than
+ the average (configurable)
+ 3) The cluster has less than 50% trackers blacklisted
+ Once in 24 hours, a TaskTracker blacklisted for all jobs is given a chance.
+ Restarting the TaskTracker moves it out of the blacklist.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4688. Modify the MiniMRDFSSort unit test to spill multiple times,
+ exercising the map-side merge code. (cdouglas)
+
+ HADOOP-4737. Adds the KILLED notification when jobs get killed.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4728. Add a test exercising different namenode configurations.
+ (Boris Shkolnik via cdouglas)
+
+ HADOOP-4807. Adds JobClient commands to get the active/blacklisted tracker
+ names. Also adds commands to display running/completed task attempt IDs.
+ (ddas)
+
+ HADOOP-4699. Remove checksum validation from map output servlet. (cdouglas)
+
+ HADOOP-4838. Added a registry to automate metrics and mbeans management.
+ (Sanjay Radia via acmurthy)
+
+ HADOOP-3136. Fixed the default scheduler to assign multiple tasks to each
+ tasktracker per heartbeat, when feasible. To ensure locality isn't hurt
+ too badly, the scheudler will not assign more than one off-switch task per
+ heartbeat. The heartbeat interval is also halved since the task-tracker is
+ fixed to no longer send out heartbeats on each task completion. A
+ slow-start for scheduling reduces is introduced to ensure that reduces
+ aren't started till sufficient number of maps are done, else reduces of
+ jobs whose maps aren't scheduled might swamp the cluster.
+ Configuration changes to mapred-default.xml:
+ add mapred.reduce.slowstart.completed.maps
+ (acmurthy)
+
+ HADOOP-4545. Add example and test case of secondary sort for the reduce.
+ (omalley)
+
+ HADOOP-4753. Refactor gridmix2 to reduce code duplication. (cdouglas)
+
+ HADOOP-4909. Fix Javadoc and make some of the API more consistent in their
+ use of the JobContext instead of Configuration. (omalley)
+
+ HADOOP-4830. Add end-to-end test cases for testing queue capacities.
+ (Vinod Kumar Vavilapalli via yhemanth)
+
+ HADOOP-4980. Improve code layout of capacity scheduler to make it
+ easier to fix some blocker bugs. (Vivek Ratan via yhemanth)
+
+ HADOOP-4916. Make user/location of Chukwa installation configurable by an
+ external properties file. (Eric Yang via cdouglas)
+
+ HADOOP-4950. Make the CompressorStream, DecompressorStream,
+ BlockCompressorStream, and BlockDecompressorStream public to facilitate
+ non-Hadoop codecs. (omalley)
+
+ HADOOP-4843. Collect job history and configuration in Chukwa. (Eric Yang
+ via cdouglas)
+
+ HADOOP-5030. Build Chukwa RPM to install into configured directory. (Eric
+ Yang via cdouglas)
+
+ HADOOP-4828. Updates documents to do with configuration (HADOOP-4631).
+ (Sharad Agarwal via ddas)
+
+ HADOOP-4939. Adds a test that would inject random failures for tasks in
+ large jobs and would also inject TaskTracker failures. (ddas)
+
+ HADOOP-4920. Stop storing Forrest output in Subversion. (cutting)
+
+ HADOOP-4944. A configuration file can include other configuration
+ files. (Rama Ramasamy via dhruba)
+
+ HADOOP-4804. Provide Forrest documentation for the Fair Scheduler.
+ (Sreekanth Ramakrishnan via yhemanth)
+
+ HADOOP-5248. A testcase that checks for the existence of job directory
+ after the job completes. Fails if it exists. (ddas)
+
+ HADOOP-4664. Introduces multiple job initialization threads, where the
+ number of threads are configurable via mapred.jobinit.threads.
+ (Matei Zaharia and Jothi Padmanabhan via ddas)
+
+ HADOOP-4191. Adds a testcase for JobHistory. (Ravi Gummadi via ddas)
+
+ HADOOP-5466. Change documenation CSS style for headers and code. (Corinne
+ Chandel via szetszwo)
+
+ HADOOP-5275. Add ivy directory and files to built tar.
+ (Giridharan Kesavan via nigel)
+
+ HADOOP-5468. Add sub-menus to forrest documentation and make some minor
+ edits. (Corinne Chandel via szetszwo)
+
+ HADOOP-5437. Fix TestMiniMRDFSSort to properly test jvm-reuse. (omalley)
+
+ HADOOP-5521. Removes dependency of TestJobInProgress on RESTART_COUNT
+ JobHistory tag. (Ravi Gummadi via ddas)
+
+ OPTIMIZATIONS
+
+ HADOOP-3293. Fixes FileInputFormat to do provide locations for splits
+ based on the rack/host that has the most number of bytes.
+ (Jothi Padmanabhan via ddas)
+
+ HADOOP-4683. Fixes Reduce shuffle scheduler to invoke
+ getMapCompletionEvents in a separate thread. (Jothi Padmanabhan
+ via ddas)
+
+ BUG FIXES
+
+ HADOOP-5379. CBZip2InputStream to throw IOException on data crc error.
+ (Rodrigo Schmidt via zshao)
+
+ HADOOP-5326. Fixes CBZip2OutputStream data corruption problem.
+ (Rodrigo Schmidt via zshao)
+
+ HADOOP-4204. Fix findbugs warnings related to unused variables, naive
+ Number subclass instantiation, Map iteration, and badly scoped inner
+ classes. (Suresh Srinivas via cdouglas)
+
+ HADOOP-4207. Update derby jar file to release 10.4.2 release.
+ (Prasad Chakka via dhruba)
+
+ HADOOP-4325. SocketInputStream.read() should return -1 in case EOF.
+ (Raghu Angadi)
+
+ HADOOP-4408. FsAction functions need not create new objects. (cdouglas)
+
+ HADOOP-4440. TestJobInProgressListener tests for jobs killed in queued
+ state (Amar Kamat via ddas)
+
+ HADOOP-4346. Implement blocking connect so that Hadoop is not affected
+ by selector problem with JDK default implementation. (Raghu Angadi)
+
+ HADOOP-4388. If there are invalid blocks in the transfer list, Datanode
+ should handle them and keep transferring the remaining blocks. (Suresh
+ Srinivas via szetszwo)
+
+ HADOOP-4587. Fix a typo in Mapper javadoc. (Koji Noguchi via szetszwo)
+
+ HADOOP-4530. In fsck, HttpServletResponse sendError fails with
+ IllegalStateException. (hairong)
+
+ HADOOP-4377. Fix a race condition in directory creation in
+ NativeS3FileSystem. (David Phillips via cdouglas)
+
+ HADOOP-4621. Fix javadoc warnings caused by duplicate jars. (Kan Zhang via
+ cdouglas)
+
+ HADOOP-4566. Deploy new hive code to support more types.
+ (Zheng Shao via dhruba)
+
+ HADOOP-4571. Add chukwa conf files to svn:ignore list. (Eric Yang via
+ szetszwo)
+
+ HADOOP-4589. Correct PiEstimator output messages and improve the code
+ readability. (szetszwo)
+
+ HADOOP-4650. Correct a mismatch between the default value of
+ local.cache.size in the config and the source. (Jeff Hammerbacher via
+ cdouglas)
+
+ HADOOP-4606. Fix cygpath error if the log directory does not exist.
+ (szetszwo via omalley)
+
+ HADOOP-4141. Fix bug in ScriptBasedMapping causing potential infinite
+ loop on misconfigured hadoop-site. (Aaron Kimball via tomwhite)
+
+ HADOOP-4691. Correct a link in the javadoc of IndexedSortable. (szetszwo)
+
+ HADOOP-4598. '-setrep' command skips under-replicated blocks. (hairong)
+
+ HADOOP-4429. Set defaults for user, group in UnixUserGroupInformation so
+ login fails more predictably when misconfigured. (Alex Loddengaard via
+ cdouglas)
+
+ HADOOP-4676. Fix broken URL in blacklisted tasktrackers page. (Amareshwari
+ Sriramadasu via cdouglas)
+
+ HADOOP-3422 Ganglia counter metrics are all reported with the metric
+ name "value", so the counter values can not be seen. (Jason Attributor
+ and Brian Bockelman via stack)
+
+ HADOOP-4704. Fix javadoc typos "the the". (szetszwo)
+
+ HADOOP-4677. Fix semantics of FileSystem::getBlockLocations to return
+ meaningful values. (Hong Tang via cdouglas)
+
+ HADOOP-4669. Use correct operator when evaluating whether access time is
+ enabled (Dhruba Borthakur via cdouglas)
+
+ HADOOP-4732. Pass connection and read timeouts in the correct order when
+ setting up fetch in reduce. (Amareshwari Sriramadasu via cdouglas)
+
+ HADOOP-4558. Fix capacity reclamation in capacity scheduler.
+ (Amar Kamat via yhemanth)
+
+ HADOOP-4770. Fix rungridmix_2 script to work with RunJar. (cdouglas)
+
+ HADOOP-4738. When using git, the saveVersion script will use only the
+ commit hash for the version and not the message, which requires escaping.
+ (cdouglas)
+
+ HADOOP-4576. Show pending job count instead of task count in the UI per
+ queue in capacity scheduler. (Sreekanth Ramakrishnan via yhemanth)
+
+ HADOOP-4623. Maintain running tasks even if speculative execution is off.
+ (Amar Kamat via yhemanth)
+
+ HADOOP-4786. Fix broken compilation error in
+ TestTrackerBlacklistAcrossJobs. (yhemanth)
+
+ HADOOP-4785. Fixes theJobTracker heartbeat to not make two calls to
+ System.currentTimeMillis(). (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4792. Add generated Chukwa configuration files to version control
+ ignore lists. (cdouglas)
+
+ HADOOP-4796. Fix Chukwa test configuration, remove unused components. (Eric
+ Yang via cdouglas)
+
+ HADOOP-4708. Add binaries missed in the initial checkin for Chukwa. (Eric
+ Yang via cdouglas)
+
+ HADOOP-4805. Remove black list collector from Chukwa Agent HTTP Sender.
+ (Eric Yang via cdouglas)
+
+ HADOOP-4837. Move HADOOP_CONF_DIR configuration to chukwa-env.sh (Jerome
+ Boulon via cdouglas)
+
+ HADOOP-4825. Use ps instead of jps for querying process status in Chukwa.
+ (Eric Yang via cdouglas)
+
+ HADOOP-4844. Fixed javadoc for
+ org.apache.hadoop.fs.permission.AccessControlException to document that
+ it's deprecated in favour of
+ org.apache.hadoop.security.AccessControlException. (acmurthy)
+
+ HADOOP-4706. Close the underlying output stream in
+ IFileOutputStream::close. (Jothi Padmanabhan via cdouglas)
+
+ HADOOP-4855. Fixed command-specific help messages for refreshServiceAcl in
+ DFSAdmin and MRAdmin. (acmurthy)
+
+ HADOOP-4820. Remove unused method FSNamesystem::deleteInSafeMode. (Suresh
+ Srinivas via cdouglas)
+
+ HADOOP-4698. Lower io.sort.mb to 10 in the tests and raise the junit memory
+ limit to 512m from 256m. (Nigel Daley via cdouglas)
+
+ HADOOP-4860. Split TestFileTailingAdapters into three separate tests to
+ avoid contention. (Eric Yang via cdouglas)
+
+ HADOOP-3921. Fixed clover (code coverage) target to work with JDK 6.
+ (tomwhite via nigel)
+
+ HADOOP-4845. Modify the reduce input byte counter to record only the
+ compressed size and add a human-readable label. (Yongqiang He via cdouglas)
+
+ HADOOP-4458. Add a test creating symlinks in the working directory.
+ (Amareshwari Sriramadasu via cdouglas)
+
+ HADOOP-4879. Fix org.apache.hadoop.mapred.Counters to correctly define
+ Object.equals rather than depend on contentEquals api. (omalley via
+ acmurthy)
+
+ HADOOP-4791. Fix rpm build process for Chukwa. (Eric Yang via cdouglas)
+
+ HADOOP-4771. Correct initialization of the file count for directories
+ with quotas. (Ruyue Ma via shv)
+
+ HADOOP-4878. Fix eclipse plugin classpath file to point to ivy's resolved
+ lib directory and added the same to test-patch.sh. (Giridharan Kesavan via
+ acmurthy)
+
+ HADOOP-4774. Fix default values of some capacity scheduler configuration
+ items which would otherwise not work on a fresh checkout.
+ (Sreekanth Ramakrishnan via yhemanth)
+
+ HADOOP-4876. Fix capacity scheduler reclamation by updating count of
+ pending tasks correctly. (Sreekanth Ramakrishnan via yhemanth)
+
+ HADOOP-4849. Documentation for Service Level Authorization implemented in
+ HADOOP-4348. (acmurthy)
+
+ HADOOP-4827. Replace Consolidator with Aggregator macros in Chukwa (Eric
+ Yang via cdouglas)
+
+ HADOOP-4894. Correctly parse ps output in Chukwa jettyCollector.sh. (Ari
+ Rabkin via cdouglas)
+
+ HADOOP-4892. Close fds out of Chukwa ExecPlugin. (Ari Rabkin via cdouglas)
+
+ HADOOP-4889. Fix permissions in RPM packaging. (Eric Yang via cdouglas)
+
+ HADOOP-4869. Fixes the TT-JT heartbeat to have an explicit flag for
+ restart apart from the initialContact flag that there was earlier.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4716. Fixes ReduceTask.java to clear out the mapping between
+ hosts and MapOutputLocation upon a JT restart (Amar Kamat via ddas)
+
+ HADOOP-4880. Removes an unnecessary testcase from TestJobTrackerRestart.
+ (Amar Kamat via ddas)
+
+ HADOOP-4924. Fixes a race condition in TaskTracker re-init. (ddas)
+
+ HADOOP-4854. Read reclaim capacity interval from capacity scheduler
+ configuration. (Sreekanth Ramakrishnan via yhemanth)
+
+ HADOOP-4896. HDFS Fsck does not load HDFS configuration. (Raghu Angadi)
+
+ HADOOP-4956. Creates TaskStatus for failed tasks with an empty Counters
+ object instead of null. (ddas)
+
+ HADOOP-4979. Fix capacity scheduler to block cluster for failed high
+ RAM requirements across task types. (Vivek Ratan via yhemanth)
+
+ HADOOP-4949. Fix native compilation. (Chris Douglas via acmurthy)
+
+ HADOOP-4787. Fixes the testcase TestTrackerBlacklistAcrossJobs which was
+ earlier failing randomly. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4914. Add description fields to Chukwa init.d scripts (Eric Yang via
+ cdouglas)
+
+ HADOOP-4884. Make tool tip date format match standard HICC format. (Eric
+ Yang via cdouglas)
+
+ HADOOP-4925. Make Chukwa sender properties configurable. (Ari Rabkin via
+ cdouglas)
+
+ HADOOP-4947. Make Chukwa command parsing more forgiving of whitespace. (Ari
+ Rabkin via cdouglas)
+
+ HADOOP-5026. Make chukwa/bin scripts executable in repository. (Andy
+ Konwinski via cdouglas)
+
+ HADOOP-4977. Fix a deadlock between the reclaimCapacity and assignTasks
+ in capacity scheduler. (Vivek Ratan via yhemanth)
+
+ HADOOP-4988. Fix reclaim capacity to work even when there are queues with
+ no capacity. (Vivek Ratan via yhemanth)
+
+ HADOOP-5065. Remove generic parameters from argument to
+ setIn/OutputFormatClass so that it works with SequenceIn/OutputFormat.
+ (cdouglas via omalley)
+
+ HADOOP-4818. Pass user config to instrumentation API. (Eric Yang via
+ cdouglas)
+
+ HADOOP-4993. Fix Chukwa agent configuration and startup to make it both
+ more modular and testable. (Ari Rabkin via cdouglas)
+
+ HADOOP-5048. Fix capacity scheduler to correctly cleanup jobs that are
+ killed after initialization, but before running.
+ (Sreekanth Ramakrishnan via yhemanth)
+
+ HADOOP-4671. Mark loop control variables shared between threads as
+ volatile. (cdouglas)
+
+ HADOOP-5079. HashFunction inadvertently destroys some randomness
+ (Jonathan Ellis via stack)
+
+ HADOOP-4999. A failure to write to FsEditsLog results in
+ IndexOutOfBounds exception. (Boris Shkolnik via rangadi)
+
+ HADOOP-5139. Catch IllegalArgumentException during metrics registration
+ in RPC. (Hairong Kuang via szetszwo)
+
+ HADOOP-5085. Copying a file to local with Crc throws an exception.
+ (hairong)
+
+ HADOOP-4759. Removes temporary output directory for failed and
+ killed tasks by launching special CLEANUP tasks for the same.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-5211. Fix check for job completion in TestSetupAndCleanupFailure.
+ (enis)
+
+ HADOOP-5254. The Configuration class should be able to work with XML
+ parsers that do not support xmlinclude. (Steve Loughran via dhruba)
+
+ HADOOP-4692. Namenode in infinite loop for replicating/deleting corrupt
+ blocks. (hairong)
+
+ HADOOP-5255. Fix use of Math.abs to avoid overflow. (Jonathan Ellis via
+ cdouglas)
+
+ HADOOP-5269. Fixes a problem to do with tasktracker holding on to
+ FAILED_UNCLEAN or KILLED_UNCLEAN tasks forever. (Amareshwari Sriramadasu
+ via ddas)
+
+ HADOOP-5214. Fixes a ConcurrentModificationException while the Fairshare
+ Scheduler accesses the tasktrackers stored by the JobTracker.
+ (Rahul Kumar Singh via yhemanth)
+
+ HADOOP-5233. Addresses the three issues - Race condition in updating
+ status, NPE in TaskTracker task localization when the conf file is missing
+ (HADOOP-5234) and NPE in handling KillTaskAction of a cleanup task
+ (HADOOP-5235). (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-5247. Introduces a broadcast of KillJobAction to all trackers when
+ a job finishes. This fixes a bunch of problems to do with NPE when a
+ completed job is not in memory and a tasktracker comes to the jobtracker
+ with a status report of a task belonging to that job. (Amar Kamat via ddas)
+
+ HADOOP-5282. Fixed job history logs for task attempts that are
+ failed by the JobTracker, say due to lost task trackers. (Amar
+ Kamat via yhemanth)
+
+ HADOOP-4963. Fixes a logging to do with getting the location of
+ map output file. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-5292. Fix NPE in KFS::getBlockLocations. (Sriram Rao via lohit)
+
+ HADOOP-5241. Fixes a bug in disk-space resource estimation. Makes
+ the estimation formula linear where blowUp =
+ Total-Output/Total-Input. (Sharad Agarwal via ddas)
+
+ HADOOP-5142. Fix MapWritable#putAll to store key/value classes.
+ (Do??acan G??ney via enis)
+
+ HADOOP-4744. Workaround for jetty6 returning -1 when getLocalPort
+ is invoked on the connector. The workaround patch retries a few
+ times before failing. (Jothi Padmanabhan via yhemanth)
+
+ HADOOP-5280. Adds a check to prevent a task state transition from
+ FAILED to any of UNASSIGNED, RUNNING, COMMIT_PENDING or
+ SUCCEEDED. (ddas)
+
+ HADOOP-5272. Fixes a problem to do with detecting whether an
+ attempt is the first attempt of a Task. This affects JobTracker
+ restart. (Amar Kamat via ddas)
+
+ HADOOP-5306. Fixes a problem to do with logging/parsing the http port of a
+ lost tracker. Affects JobTracker restart. (Amar Kamat via ddas)
+
+ HADOOP-5111. Fix Job::set* methods to work with generics. (cdouglas)
+
+ HADOOP-5274. Fix gridmix2 dependency on wordcount example. (cdouglas)
+
+ HADOOP-5145. Balancer sometimes runs out of memory after running
+ days or weeks. (hairong)
+
+ HADOOP-5338. Fix jobtracker restart to clear task completion
+ events cached by tasktrackers forcing them to fetch all events
+ afresh, thus avoiding missed task completion events on the
+ tasktrackers. (Amar Kamat via yhemanth)
+
+ HADOOP-4695. Change TestGlobalFilter so that it allows a web page to be
+ filtered more than once for a single access. (Kan Zhang via szetszwo)
+
+ HADOOP-5298. Change TestServletFilter so that it allows a web page to be
+ filtered more than once for a single access. (szetszwo)
+
+ HADOOP-5432. Disable ssl during unit tests in hdfsproxy, as it is unused
+ and causes failures. (cdouglas)
+
+ HADOOP-5416. Correct the shell command "fs -test" forrest doc description.
+ (Ravi Phulari via szetszwo)
+
+ HADOOP-5327. Fixed job tracker to remove files from system directory on
+ ACL check failures and also check ACLs on restart.
+ (Amar Kamat via yhemanth)
+
+ HADOOP-5395. Change the exception message when a job is submitted to an
+ invalid queue. (Rahul Kumar Singh via yhemanth)
+
+ HADOOP-5276. Fixes a problem to do with updating the start time of
+ a task when the tracker that ran the task is lost. (Amar Kamat via
+ ddas)
+
+ HADOOP-5278. Fixes a problem to do with logging the finish time of
+ a task during recovery (after a JobTracker restart). (Amar Kamat
+ via ddas)
+
+ HADOOP-5490. Fixes a synchronization problem in the
+ EagerTaskInitializationListener class. (Jothi Padmanabhan via
+ ddas)
+
+ HADOOP-5493. The shuffle copier threads return the codecs back to
+ the pool when the shuffle completes. (Jothi Padmanabhan via ddas)
+
+ HADOOP-5505. Fix JspHelper initialization in the context of
+ MiniDFSCluster. (Raghu Angadi)
+
+ HADOOP-5414. Fixes IO exception while executing hadoop fs -touchz
+ fileName by making sure that lease renewal thread exits before dfs
+ client exits. (hairong)
+
+ HADOOP-5103. FileInputFormat now reuses the clusterMap network
+ topology object and that brings down the log messages in the
+ JobClient to do with NetworkTopology.add significantly. (Jothi
+ Padmanabhan via ddas)
+
+ HADOOP-5483. Fixes a problem in the Directory Cleanup Thread due to which
+ TestMiniMRWithDFS sometimes used to fail. (ddas)
+
+ HADOOP-5281. Prevent sharing incompatible ZlibCompressor instances between
+ GzipCodec and DefaultCodec. (cdouglas)
+
+ HADOOP-5463. Balancer throws "Not a host:port pair" unless port is
+ specified in fs.default.name. (Stuart White via hairong)
+
+ HADOOP-5514. Fix JobTracker metrics and add metrics for wating, failed
+ tasks. (cdouglas)
+
+ HADOOP-5516. Fix NullPointerException in TaskMemoryManagerThread
+ that comes when monitored processes disappear when the thread is
+ running. (Vinod Kumar Vavilapalli via yhemanth)
+
+ HADOOP-5382. Support combiners in the new context object API. (omalley)
+
+ HADOOP-5471. Fixes a problem to do with updating the log.index file in the
+ case where a cleanup task is run. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-5534. Fixed a deadlock in Fair scheduler's servlet.
+ (Rahul Kumar Singh via yhemanth)
+
+ HADOOP-5328. Fixes a problem in the renaming of job history files during
+ job recovery. Amar Kamat via ddas)
+
+ HADOOP-5417. Don't ignore InterruptedExceptions that happen when calling
+ into rpc. (omalley)
+
+ HADOOP-5320. Add a close() in TestMapReduceLocal. (Jothi Padmanabhan
+ via szetszwo)
+
+ HADOOP-5520. Fix a typo in disk quota help message. (Ravi Phulari
+ via szetszwo)
+
+ HADOOP-5519. Remove claims from mapred-default.xml that prime numbers
+ of tasks are helpful. (Owen O'Malley via szetszwo)
+
+ HADOOP-5484. TestRecoveryManager fails wtih FileAlreadyExistsException.
+ (Amar Kamat via hairong)
+
+ HADOOP-5564. Limit the JVM heap size in the java command for initializing
+ JAVA_PLATFORM. (Suresh Srinivas via szetszwo)
+
+ HADOOP-5565. Add API for failing/finalized jobs to the JT metrics
+ instrumentation. (Jerome Boulon via cdouglas)
+
+ HADOOP-5390. Remove duplicate jars from tarball, src from binary tarball
+ added by hdfsproxy. (Zhiyong Zhang via cdouglas)
+
+ HADOOP-5066. Building binary tarball should not build docs/javadocs, copy
+ src, or run jdiff. (Giridharan Kesavan via cdouglas)
+
+ HADOOP-5459. Fix undetected CRC errors where intermediate output is closed
+ before it has been completely consumed. (cdouglas)
+
+ HADOOP-5571. Remove widening primitive conversion in TupleWritable mask
+ manipulation. (Jingkei Ly via cdouglas)
+
+ HADOOP-5588. Remove an unnecessary call to listStatus(..) in
+ FileSystem.globStatusInternal(..). (Hairong Kuang via szetszwo)
+
+ HADOOP-5473. Solves a race condition in killing a task - the state is KILLED
+ if there is a user request pending to kill the task and the TT reported
+ the state as SUCCESS. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-5576. Fix LocalRunner to work with the new context object API in
+ mapreduce. (Tom White via omalley)
+
+ HADOOP-4374. Installs a shutdown hook in the Task JVM so that log.index is
+ updated before the JVM exits. Also makes the update to log.index atomic.
+ (Ravi Gummadi via ddas)
+
+ HADOOP-5577. Add a verbose flag to mapreduce.Job.waitForCompletion to get
+ the running job's information printed to the user's stdout as it runs.
+ (omalley)
+
+ HADOOP-5607. Fix NPE in TestCapacityScheduler. (cdouglas)
+
+ HADOOP-5605. All the replicas incorrectly got marked as corrupt. (hairong)
+
+ HADOOP-5337. JobTracker, upon restart, now waits for the TaskTrackers to
+ join back before scheduling new tasks. This fixes race conditions associated
+ with greedy scheduling as was the case earlier. (Amar Kamat via ddas)
+
+ HADOOP-5227. Fix distcp so -update and -delete can be meaningfully
+ combined. (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-5305. Increase number of files and print debug messages in
+ TestCopyFiles. (szetszwo)
+
+ HADOOP-5548. Add synchronization for JobTracker methods in RecoveryManager.
+ (Amareshwari Sriramadasu via sharad)
+
+ HADOOP-3810. NameNode seems unstable on a cluster with little space left.
+ (hairong)
+
+ HADOOP-5068. Fix NPE in TestCapacityScheduler. (Vinod Kumar Vavilapalli
+ via szetszwo)
+
+ HADOOP-5585. Clear FileSystem statistics between tasks when jvm-reuse
+ is enabled. (omalley)
+
+ HADOOP-5394. JobTracker might schedule 2 attempts of the same task
+ with the same attempt id across restarts. (Amar Kamat via sharad)
+
+ HADOOP-5645. After HADOOP-4920 we need a place to checkin
+ releasenotes.html. (nigel)
+
+Release 0.19.2 - Unreleased
+
+ BUG FIXES
+
+ HADOOP-5154. Fixes a deadlock in the fairshare scheduler.
+ (Matei Zaharia via yhemanth)
+
+ HADOOP-5146. Fixes a race condition that causes LocalDirAllocator to miss
+ files. (Devaraj Das via yhemanth)
+
+ HADOOP-4638. Fixes job recovery to not crash the job tracker for problems
+ with a single job file. (Amar Kamat via yhemanth)
+
+ HADOOP-5384. Fix a problem that DataNodeCluster creates blocks with
+ generationStamp == 1. (szetszwo)
+
+ HADOOP-5376. Fixes the code handling lost tasktrackers to set the task state
+ to KILLED_UNCLEAN only for relevant type of tasks.
+ (Amareshwari Sriramadasu via yhemanth)
+
+ HADOOP-5285. Fixes the issues - (1) obtainTaskCleanupTask checks whether job is
+ inited before trying to lock the JobInProgress (2) Moves the CleanupQueue class
+ outside the TaskTracker and makes it a generic class that is used by the
+ JobTracker also for deleting the paths on the job's output fs. (3) Moves the
+ references to completedJobStore outside the block where the JobTracker is locked.
+ (ddas)
+
+ HADOOP-5392. Fixes a problem to do with JT crashing during recovery when
+ the job files are garbled. (Amar Kamat vi ddas)
+
+ HADOOP-5332. Appending to files is not allowed (by default) unless
+ dfs.support.append is set to true. (dhruba)
+
+ HADOOP-5333. libhdfs supports appending to files. (dhruba)
+
+ HADOOP-3998. Fix dfsclient exception when JVM is shutdown. (dhruba)
+
+ HADOOP-5440. Fixes a problem to do with removing a taskId from the list
+ of taskIds that the TaskTracker's TaskMemoryManager manages.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-5446. Restore TaskTracker metrics. (cdouglas)
+
+ HADOOP-5449. Fixes the history cleaner thread.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-5479. NameNode should not send empty block replication request to
+ DataNode. (hairong)
+
+ HADOOP-5259. Job with output hdfs:/user/<username>/outputpath (no
+ authority) fails with Wrong FS. (Doug Cutting via hairong)
+
+ HADOOP-5522. Documents the setup/cleanup tasks in the mapred tutorial.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-5549. ReplicationMonitor should schedule both replication and
+ deletion work in one iteration. (hairong)
+
+ HADOOP-5554. DataNodeCluster and CreateEditsLog should create blocks with
+ the same generation stamp value. (hairong via szetszwo)
+
+ HADOOP-5231. Clones the TaskStatus before passing it to the JobInProgress.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4719. Fix documentation of 'ls' format for FsShell. (Ravi Phulari
+ via cdouglas)
+
+ HADOOP-5374. Fixes a NPE problem in getTasksToSave method.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4780. Cache the size of directories in DistributedCache, avoiding
+ long delays in recalculating it. (He Yongqiang via cdouglas)
+
+ HADOOP-5551. Prevent directory destruction on file create.
+ (Brian Bockelman via shv)
+
+Release 0.19.1 - 2009-02-23
+
+ IMPROVEMENTS
+
+ HADOOP-4739. Fix spelling and grammar, improve phrasing of some sections in
+ mapred tutorial. (Vivek Ratan via cdouglas)
+
+ HADOOP-3894. DFSClient logging improvements. (Steve Loughran via shv)
+
+ HADOOP-5126. Remove empty file BlocksWithLocations.java (shv)
+
+ HADOOP-5127. Remove public methods in FSDirectory. (Jakob Homan via shv)
+
+ BUG FIXES
+
+ HADOOP-4697. Fix getBlockLocations in KosmosFileSystem to handle multiple
+ blocks correctly. (Sriram Rao via cdouglas)
+
+ HADOOP-4420. Add null checks for job, caused by invalid job IDs.
+ (Aaron Kimball via tomwhite)
+
+ HADOOP-4632. Fix TestJobHistoryVersion to use test.build.dir instead of the
+ current workding directory for scratch space. (Amar Kamat via cdouglas)
+
+ HADOOP-4508. Fix FSDataOutputStream.getPos() for append. (dhruba via
+ szetszwo)
+
+ HADOOP-4727. Fix a group checking bug in fill_stat_structure(...) in
+ fuse-dfs. (Brian Bockelman via szetszwo)
+
+ HADOOP-4836. Correct typos in mapred related documentation. (Jord? Polo
+ via szetszwo)
+
+ HADOOP-4821. Usage description in the Quotas guide documentations are
+ incorrect. (Boris Shkolnik via hairong)
+
+ HADOOP-4847. Moves the loading of OutputCommitter to the Task.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4966. Marks completed setup tasks for removal.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4982. TestFsck should run in Eclipse. (shv)
+
+ HADOOP-5008. TestReplication#testPendingReplicationRetry leaves an opened
+ fd unclosed. (hairong)
+
+ HADOOP-4906. Fix TaskTracker OOM by keeping a shallow copy of JobConf in
+ TaskTracker.TaskInProgress. (Sharad Agarwal via acmurthy)
+
+ HADOOP-4918. Fix bzip2 compression to work with Sequence Files.
+ (Zheng Shao via dhruba).
+
+ HADOOP-4965. TestFileAppend3 should close FileSystem. (shv)
+
+ HADOOP-4967. Fixes a race condition in the JvmManager to do with killing
+ tasks. (ddas)
+
+ HADOOP-5009. DataNode#shutdown sometimes leaves data block scanner
+ verification log unclosed. (hairong)
+
+ HADOOP-5086. Use the appropriate FileSystem for trash URIs. (cdouglas)
+
+ HADOOP-4955. Make DBOutputFormat us column names from setOutput().
+ (Kevin Peterson via enis)
+
+ HADOOP-4862. Minor : HADOOP-3678 did not remove all the cases of
+ spurious IOExceptions logged by DataNode. (Raghu Angadi)
+
+ HADOOP-5034. NameNode should send both replication and deletion requests
+ to DataNode in one reply to a heartbeat. (hairong)
+
+ HADOOP-5156. TestHeartbeatHandling uses MiiDFSCluster.getNamesystem()
+ which does not exit in branch 0.19 and 0.20. (hairong)
+
+ HADOOP-5161. Accepted sockets do not get placed in
+ DataXceiverServer#childSockets. (hairong)
+
+ HADOOP-5193. Correct calculation of edits modification time. (shv)
+
+ HADOOP-4494. Allow libhdfs to append to files.
+ (Pete Wyckoff via dhruba)
+
+ HADOOP-5166. Fix JobTracker restart to work when ACLs are configured
+ for the JobTracker. (Amar Kamat via yhemanth).
+
+ HADOOP-5067. Fixes TaskInProgress.java to keep track of count of failed and
+ killed tasks correctly. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4760. HDFS streams should not throw exceptions when closed twice.
+ (enis)
+
+Release 0.19.0 - 2008-11-18
+
+ INCOMPATIBLE CHANGES
+
+ HADOOP-3595. Remove deprecated methods for mapred.combine.once
+ functionality, which was necessary to providing backwards
+ compatible combiner semantics for 0.18. (cdouglas via omalley)
+
+ HADOOP-3667. Remove the following deprecated methods from JobConf:
+ addInputPath(Path)
+ getInputPaths()
+ getMapOutputCompressionType()
+ getOutputPath()
+ getSystemDir()
+ setInputPath(Path)
+ setMapOutputCompressionType(CompressionType style)
+ setOutputPath(Path)
+ (Amareshwari Sriramadasu via omalley)
+
+ HADOOP-3652. Remove deprecated class OutputFormatBase.
+ (Amareshwari Sriramadasu via cdouglas)
+
+ HADOOP-2885. Break the hadoop.dfs package into separate packages under
+ hadoop.hdfs that reflect whether they are client, server, protocol,
+ etc. DistributedFileSystem and DFSClient have moved and are now
+ considered package private. (Sanjay Radia via omalley)
+
+ HADOOP-2325. Require Java 6. (cutting)
+
+ HADOOP-372. Add support for multiple input paths with a different
+ InputFormat and Mapper for each path. (Chris Smith via tomwhite)
+
+ HADOOP-1700. Support appending to file in HDFS. (dhruba)
+
+ HADOOP-3792. Make FsShell -test consistent with unix semantics, returning
+ zero for true and non-zero for false. (Ben Slusky via cdouglas)
+
+ HADOOP-3664. Remove the deprecated method InputFormat.validateInput,
+ which is no longer needed. (tomwhite via omalley)
+
+ HADOOP-3549. Give more meaningful errno's in libhdfs. In particular,
+ EACCES is returned for permission problems. (Ben Slusky via omalley)
+
+ HADOOP-4036. ResourceStatus was added to TaskTrackerStatus by HADOOP-3759,
+ so increment the InterTrackerProtocol version. (Hemanth Yamijala via
+ omalley)
+
+ HADOOP-3150. Moves task promotion to tasks. Defines a new interface for
+ committing output files. Moves job setup to jobclient, and moves jobcleanup
+ to a separate task. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3446. Keep map outputs in memory during the reduce. Remove
+ fs.inmemory.size.mb and replace with properties defining in memory map
+ output retention during the shuffle and reduce relative to maximum heap
+ usage. (cdouglas)
+
+ HADOOP-3245. Adds the feature for supporting JobTracker restart. Running
+ jobs can be recovered from the history file. The history file format has
+ been modified to support recovery. The task attempt ID now has the
+ JobTracker start time to disinguish attempts of the same TIP across
+ restarts. (Amar Ramesh Kamat via ddas)
+
+ HADOOP-4007. REMOVE DFSFileInfo - FileStatus is sufficient.
+ (Sanjay Radia via hairong)
+
+ HADOOP-3722. Fixed Hadoop Streaming and Hadoop Pipes to use the Tool
+ interface and GenericOptionsParser. (Enis Soztutar via acmurthy)
+
+ HADOOP-2816. Cluster summary at name node web reports the space
+ utilization as:
+ Configured Capacity: capacity of all the data directories - Reserved space
+ Present Capacity: Space available for dfs,i.e. remaining+used space
+ DFS Used%: DFS used space/Present Capacity
+ (Suresh Srinivas via hairong)
+
+ HADOOP-3938. Disk space quotas for HDFS. This is similar to namespace
+ quotas in 0.18. (rangadi)
+
+ HADOOP-4293. Make Configuration Writable and remove unreleased
+ WritableJobConf. Configuration.write is renamed to writeXml. (omalley)
+
+ HADOOP-4281. Change dfsadmin to report available disk space in a format
+ consistent with the web interface as defined in HADOOP-2816. (Suresh
+ Srinivas via cdouglas)
+
+ HADOOP-4430. Further change the cluster summary at name node web that was
+ changed in HADOOP-2816:
+ Non DFS Used - This indicates the disk space taken by non DFS file from
+ the Configured capacity
+ DFS Used % - DFS Used % of Configured Capacity
+ DFS Remaining % - Remaing % Configured Capacity available for DFS use
+ DFS command line report reflects the same change. Config parameter
+ dfs.datanode.du.pct is no longer used and is removed from the
+ hadoop-default.xml. (Suresh Srinivas via hairong)
+
+ HADOOP-4116. Balancer should provide better resource management. (hairong)
+
+ HADOOP-4599. BlocksMap and BlockInfo made package private. (shv)
+
+ NEW FEATURES
+
+ HADOOP-3341. Allow streaming jobs to specify the field separator for map
+ and reduce input and output. The new configuration values are:
+ stream.map.input.field.separator
+ stream.map.output.field.separator
+ stream.reduce.input.field.separator
+ stream.reduce.output.field.separator
+ All of them default to "\t". (Zheng Shao via omalley)
+
+ HADOOP-3479. Defines the configuration file for the resource manager in
+ Hadoop. You can configure various parameters related to scheduling, such
+ as queues and queue properties here. The properties for a queue follow a
+ naming convention,such as, hadoop.rm.queue.queue-name.property-name.
+ (Hemanth Yamijala via ddas)
+
+ HADOOP-3149. Adds a way in which map/reducetasks can create multiple
+ outputs. (Alejandro Abdelnur via ddas)
+
+ HADOOP-3714. Add a new contrib, bash-tab-completion, which enables
+ bash tab completion for the bin/hadoop script. See the README file
+ in the contrib directory for the installation. (Chris Smith via enis)
+
+ HADOOP-3730. Adds a new JobConf constructor that disables loading
+ default configurations. (Alejandro Abdelnur via ddas)
+
+ HADOOP-3772. Add a new Hadoop Instrumentation api for the JobTracker and
+ the TaskTracker, refactor Hadoop Metrics as an implementation of the api.
+ (Ari Rabkin via acmurthy)
+
+ HADOOP-2302. Provides a comparator for numerical sorting of key fields.
+ (ddas)
+
+ HADOOP-153. Provides a way to skip bad records. (Sharad Agarwal via ddas)
+
+ HADOOP-657. Free disk space should be modelled and used by the scheduler
+ to make scheduling decisions. (Ari Rabkin via omalley)
+
+ HADOOP-3719. Initial checkin of Chukwa, which is a data collection and
+ analysis framework. (Jerome Boulon, Andy Konwinski, Ari Rabkin,
+ and Eric Yang)
+
+ HADOOP-3873. Add -filelimit and -sizelimit options to distcp to cap the
+ number of files/bytes copied in a particular run to support incremental
+ updates and mirroring. (TszWo (Nicholas), SZE via cdouglas)
+
+ HADOOP-3585. FailMon package for hardware failure monitoring and
+ analysis of anomalies. (Ioannis Koltsidas via dhruba)
+
+ HADOOP-1480. Add counters to the C++ Pipes API. (acmurthy via omalley)
+
+ HADOOP-3854. Add support for pluggable servlet filters in the HttpServers.
+ (Tsz Wo (Nicholas) Sze via omalley)
+
+ HADOOP-3759. Provides ability to run memory intensive jobs without
+ affecting other running tasks on the nodes. (Hemanth Yamijala via ddas)
+
+ HADOOP-3746. Add a fair share scheduler. (Matei Zaharia via omalley)
+
+ HADOOP-3754. Add a thrift interface to access HDFS. (dhruba via omalley)
+
+ HADOOP-3828. Provides a way to write skipped records to DFS.
+ (Sharad Agarwal via ddas)
+
+ HADOOP-3948. Separate name-node edits and fsimage directories.
+ (Lohit Vijayarenu via shv)
+
+ HADOOP-3939. Add an option to DistCp to delete files at the destination
+ not present at the source. (Tsz Wo (Nicholas) Sze via cdouglas)
+
+ HADOOP-3601. Add a new contrib module for Hive, which is a sql-like
+ query processing tool that uses map/reduce. (Ashish Thusoo via omalley)
+
+ HADOOP-3866. Added sort and multi-job updates in the JobTracker web ui.
+ (Craig Weisenfluh via omalley)
+
+ HADOOP-3698. Add access control to control who is allowed to submit or
+ modify jobs in the JobTracker. (Hemanth Yamijala via omalley)
+
+ HADOOP-1869. Support access times for HDFS files. (dhruba)
+
+ HADOOP-3941. Extend FileSystem API to return file-checksums.
+ (szetszwo)
+
+ HADOOP-3581. Prevents memory intensive user tasks from taking down
+ nodes. (Vinod K V via ddas)
+
+ HADOOP-3970. Provides a way to recover counters written to JobHistory.
+ (Amar Kamat via ddas)
+
+ HADOOP-3702. Adds ChainMapper and ChainReducer classes allow composing
+ chains of Maps and Reduces in a single Map/Reduce job, something like
+ MAP+ / REDUCE MAP*. (Alejandro Abdelnur via ddas)
+
+ HADOOP-3445. Add capacity scheduler that provides guaranteed capacities to
+ queues as a percentage of the cluster. (Vivek Ratan via omalley)
+
+ HADOOP-3992. Add a synthetic load generation facility to the test
+ directory. (hairong via szetszwo)
+
+ HADOOP-3981. Implement a distributed file checksum algorithm in HDFS
+ and change DistCp to use file checksum for comparing src and dst files
+ (szetszwo)
+
+ HADOOP-3829. Narrown down skipped records based on user acceptable value.
+ (Sharad Agarwal via ddas)
+
+ HADOOP-3930. Add common interfaces for the pluggable schedulers and the
+ cli & gui clients. (Sreekanth Ramakrishnan via omalley)
+
+ HADOOP-4176. Implement getFileChecksum(Path) in HftpFileSystem. (szetszwo)
+
+ HADOOP-249. Reuse JVMs across Map-Reduce Tasks.
+ Configuration changes to hadoop-default.xml:
+ add mapred.job.reuse.jvm.num.tasks
+ (Devaraj Das via acmurthy)
+
+ HADOOP-4070. Provide a mechanism in Hive for registering UDFs from the
+ query language. (tomwhite)
+
+ HADOOP-2536. Implement a JDBC based database input and output formats to
+ allow Map-Reduce applications to work with databases. (Fredrik Hedberg and
+ Enis Soztutar via acmurthy)
+
+ HADOOP-3019. A new library to support total order partitions.
+ (cdouglas via omalley)
+
+ HADOOP-3924. Added a 'KILLED' job status. (Subramaniam Krishnan via
+ acmurthy)
+
+ IMPROVEMENTS
+
+ HADOOP-4205. hive: metastore and ql to use the refactored SerDe library.
+ (zshao)
+
+ HADOOP-4106. libhdfs: add time, permission and user attribute support
+ (part 2). (Pete Wyckoff through zshao)
+
+ HADOOP-4104. libhdfs: add time, permission and user attribute support.
+ (Pete Wyckoff through zshao)
+
+ HADOOP-3908. libhdfs: better error message if llibhdfs.so doesn't exist.
+ (Pete Wyckoff through zshao)
+
+ HADOOP-3732. Delay intialization of datanode block verification till
+ the verification thread is started. (rangadi)
+
+ HADOOP-1627. Various small improvements to 'dfsadmin -report' output.
+ (rangadi)
+
+ HADOOP-3577. Tools to inject blocks into name node and simulated
+ data nodes for testing. (Sanjay Radia via hairong)
+
+ HADOOP-2664. Add a lzop compatible codec, so that files compressed by lzop
+ may be processed by map/reduce. (cdouglas via omalley)
+
+ HADOOP-3655. Add additional ant properties to control junit. (Steve
+ Loughran via omalley)
+
+ HADOOP-3543. Update the copyright year to 2008. (cdouglas via omalley)
+
+ HADOOP-3587. Add a unit test for the contrib/data_join framework.
+ (cdouglas)
+
+ HADOOP-3402. Add terasort example program (omalley)
+
+ HADOOP-3660. Add replication factor for injecting blocks in simulated
+ datanodes. (Sanjay Radia via cdouglas)
+
+ HADOOP-3684. Add a cloning function to the contrib/data_join framework
+ permitting users to define a more efficient method for cloning values from
+ the reduce than serialization/deserialization. (Runping Qi via cdouglas)
+
+ HADOOP-3478. Improves the handling of map output fetching. Now the
+ randomization is by the hosts (and not the map outputs themselves).
+ (Jothi Padmanabhan via ddas)
+
+ HADOOP-3617. Removed redundant checks of accounting space in MapTask and
+ makes the spill thread persistent so as to avoid creating a new one for
+ each spill. (Chris Douglas via acmurthy)
+
+ HADOOP-3412. Factor the scheduler out of the JobTracker and make
+ it pluggable. (Tom White and Brice Arnould via omalley)
+
+ HADOOP-3756. Minor. Remove unused dfs.client.buffer.dir from
+ hadoop-default.xml. (rangadi)
+
+ HADOOP-3747. Adds counter suport for MultipleOutputs.
+ (Alejandro Abdelnur via ddas)
+
+ HADOOP-3169. LeaseChecker daemon should not be started in DFSClient
+ constructor. (TszWo (Nicholas), SZE via hairong)
+
+ HADOOP-3824. Move base functionality of StatusHttpServer to a core
+ package. (TszWo (Nicholas), SZE via cdouglas)
+
+ HADOOP-3646. Add a bzip2 compatible codec, so bzip compressed data
+ may be processed by map/reduce. (Abdul Qadeer via cdouglas)
+
+ HADOOP-3861. MapFile.Reader and Writer should implement Closeable.
+ (tomwhite via omalley)
+
+ HADOOP-3791. Introduce generics into ReflectionUtils. (Chris Smith via
+ cdouglas)
+
+ HADOOP-3694. Improve unit test performance by changing
+ MiniDFSCluster to listen only on 127.0.0.1. (cutting)
+
+ HADOOP-3620. Namenode should synchronously resolve a datanode's network
+ location when the datanode registers. (hairong)
+
+ HADOOP-3860. NNThroughputBenchmark is extended with rename and delete
+ benchmarks. (shv)
+
+ HADOOP-3892. Include unix group name in JobConf. (Matei Zaharia via johan)
+
+ HADOOP-3875. Change the time period between heartbeats to be relative to
+ the end of the heartbeat rpc, rather than the start. This causes better
+ behavior if the JobTracker is overloaded. (acmurthy via omalley)
+
+ HADOOP-3853. Move multiple input format (HADOOP-372) extension to
+ library package. (tomwhite via johan)
+
+ HADOOP-9. Use roulette scheduling for temporary space when the size
+ is not known. (Ari Rabkin via omalley)
+
+ HADOOP-3202. Use recursive delete rather than FileUtil.fullyDelete.
+ (Amareshwari Sriramadasu via omalley)
+
+ HADOOP-3368. Remove common-logging.properties from conf. (Steve Loughran
+ via omalley)
+
+ HADOOP-3851. Fix spelling mistake in FSNamesystemMetrics. (Steve Loughran
+ via omalley)
+
+ HADOOP-3780. Remove asynchronous resolution of network topology in the
+ JobTracker (Amar Kamat via omalley)
+
+ HADOOP-3852. Add ShellCommandExecutor.toString method to make nicer
+ error messages. (Steve Loughran via omalley)
+
+ HADOOP-3844. Include message of local exception in RPC client failures.
+ (Steve Loughran via omalley)
+
+ HADOOP-3935. Split out inner classes from DataNode.java. (johan)
+
+ HADOOP-3905. Create generic interfaces for edit log streams. (shv)
+
+ HADOOP-3062. Add metrics to DataNode and TaskTracker to record network
+ traffic for HDFS reads/writes and MR shuffling. (cdouglas)
+
+ HADOOP-3742. Remove HDFS from public java doc and add javadoc-dev for
+ generative javadoc for developers. (Sanjay Radia via omalley)
+
+ HADOOP-3944. Improve documentation for public TupleWritable class in
+ join package. (Chris Douglas via enis)
+
+ HADOOP-2330. Preallocate HDFS transaction log to improve performance.
+ (dhruba and hairong)
+
+ HADOOP-3965. Convert DataBlockScanner into a package private class. (shv)
+
+ HADOOP-3488. Prevent hadoop-daemon from rsync'ing log files (Stefan
+ Groshupf and Craig Macdonald via omalley)
+
+ HADOOP-3342. Change the kill task actions to require http post instead of
+ get to prevent accidental crawls from triggering it. (enis via omalley)
+
+ HADOOP-3937. Limit the job name in the job history filename to 50
+ characters. (Matei Zaharia via omalley)
+
+ HADOOP-3943. Remove unnecessary synchronization in
+ NetworkTopology.pseudoSortByDistance. (hairong via omalley)
+
+ HADOOP-3498. File globbing alternation should be able to span path
+ components. (tomwhite)
+
+ HADOOP-3361. Implement renames for NativeS3FileSystem.
+ (Albert Chern via tomwhite)
+
+ HADOOP-3605. Make EC2 scripts show an error message if AWS_ACCOUNT_ID is
+ unset. (Al Hoang via tomwhite)
+
+ HADOOP-4147. Remove unused class JobWithTaskContext from class
+ JobInProgress. (Amareshwari Sriramadasu via johan)
+
+ HADOOP-4151. Add a byte-comparable interface that both Text and
+ BytesWritable implement. (cdouglas via omalley)
+
+ HADOOP-4174. Move fs image/edit log methods from ClientProtocol to
+ NamenodeProtocol. (shv via szetszwo)
+
+ HADOOP-4181. Include a .gitignore and saveVersion.sh change to support
+ developing under git. (omalley)
+
+ HADOOP-4186. Factor LineReader out of LineRecordReader. (tomwhite via
+ omalley)
+
+ HADOOP-4184. Break the module dependencies between core, hdfs, and
+ mapred. (tomwhite via omalley)
+
+ HADOOP-4075. test-patch.sh now spits out ant commands that it runs.
+ (Ramya R via nigel)
+
+ HADOOP-4117. Improve configurability of Hadoop EC2 instances.
+ (tomwhite)
+
+ HADOOP-2411. Add support for larger CPU EC2 instance types.
+ (Chris K Wensel via tomwhite)
+
+ HADOOP-4083. Changed the configuration attribute queue.name to
+ mapred.job.queue.name. (Hemanth Yamijala via acmurthy)
+
+ HADOOP-4194. Added the JobConf and JobID to job-related methods in
+ JobTrackerInstrumentation for better metrics. (Mac Yang via acmurthy)
+
+ HADOOP-3975. Change test-patch script to report working the dir
+ modifications preventing the suite from being run. (Ramya R via cdouglas)
+
+ HADOOP-4124. Added a command-line switch to allow users to set job
+ priorities, also allow it to be manipulated via the web-ui. (Hemanth
+ Yamijala via acmurthy)
+
+ HADOOP-2165. Augmented JobHistory to include the URIs to the tasks'
+ userlogs. (Vinod Kumar Vavilapalli via acmurthy)
+
+ HADOOP-4062. Remove the synchronization on the output stream when a
+ connection is closed and also remove an undesirable exception when
+ a client is stoped while there is no pending RPC request. (hairong)
+
+ HADOOP-4227. Remove the deprecated class org.apache.hadoop.fs.ShellCommand.
+ (szetszwo)
+
+ HADOOP-4006. Clean up FSConstants and move some of the constants to
+ better places. (Sanjay Radia via rangadi)
+
+ HADOOP-4279. Trace the seeds of random sequences in append unit tests to
+ make itermitant failures reproducible. (szetszwo via cdouglas)
+
+ HADOOP-4209. Remove the change to the format of task attempt id by
+ incrementing the task attempt numbers by 1000 when the job restarts.
+ (Amar Kamat via omalley)
+
+ HADOOP-4301. Adds forrest doc for the skip bad records feature.
+ (Sharad Agarwal via ddas)
+
+ HADOOP-4354. Separate TestDatanodeDeath.testDatanodeDeath() into 4 tests.
+ (szetszwo)
+
+ HADOOP-3790. Add more unit tests for testing HDFS file append. (szetszwo)
+
+ HADOOP-4321. Include documentation for the capacity scheduler. (Hemanth
+ Yamijala via omalley)
+
+ HADOOP-4424. Change menu layout for Hadoop documentation (Boris Shkolnik
+ via cdouglas).
+
+ HADOOP-4438. Update forrest documentation to include missing FsShell
+ commands. (Suresh Srinivas via cdouglas)
+
+ HADOOP-4105. Add forrest documentation for libhdfs.
+ (Pete Wyckoff via cutting)
+
+ HADOOP-4510. Make getTaskOutputPath public. (Chris Wensel via omalley)
+
+ OPTIMIZATIONS
+
+ HADOOP-3556. Removed lock contention in MD5Hash by changing the
+ singleton MessageDigester by an instance per Thread using
+ ThreadLocal. (Iv?n de Prado via omalley)
+
+ HADOOP-3328. When client is writing data to DFS, only the last
+ datanode in the pipeline needs to verify the checksum. Saves around
+ 30% CPU on intermediate datanodes. (rangadi)
+
+ HADOOP-3863. Use a thread-local string encoder rather than a static one
+ that is protected by a lock. (acmurthy via omalley)
+
+ HADOOP-3864. Prevent the JobTracker from locking up when a job is being
+ initialized. (acmurthy via omalley)
+
+ HADOOP-3816. Faster directory listing in KFS. (Sriram Rao via omalley)
+
+ HADOOP-2130. Pipes submit job should have both blocking and non-blocking
+ versions. (acmurthy via omalley)
+
+ HADOOP-3769. Make the SampleMapper and SampleReducer from
+ GenericMRLoadGenerator public, so they can be used in other contexts.
+ (Lingyun Yang via omalley)
+
+ HADOOP-3514. Inline the CRCs in intermediate files as opposed to reading
+ it from a different .crc file. (Jothi Padmanabhan via ddas)
+
+ HADOOP-3638. Caches the iFile index files in memory to reduce seeks
+ (Jothi Padmanabhan via ddas)
+
+ HADOOP-4225. FSEditLog.logOpenFile() should persist accessTime
+ rather than modificationTime. (shv)
+
+ HADOOP-4380. Made several new classes (Child, JVMId,
+ JobTrackerInstrumentation, QueueManager, ResourceEstimator,
+ TaskTrackerInstrumentation, and TaskTrackerMetricsInst) in
+ org.apache.hadoop.mapred package private instead of public. (omalley)
+
+ BUG FIXES
+
+ HADOOP-3563. Refactor the distributed upgrade code so that it is
+ easier to identify datanode and namenode related code. (dhruba)
+
+ HADOOP-3640. Fix the read method in the NativeS3InputStream. (tomwhite via
+ omalley)
+
+ HADOOP-3711. Fixes the Streaming input parsing to properly find the
+ separator. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3725. Prevent TestMiniMRMapDebugScript from swallowing exceptions.
+ (Steve Loughran via cdouglas)
+
+ HADOOP-3726. Throw exceptions from TestCLI setup and teardown instead of
+ swallowing them. (Steve Loughran via cdouglas)
+
+ HADOOP-3721. Refactor CompositeRecordReader and related mapred.join classes
+ to make them clearer. (cdouglas)
+
+ HADOOP-3720. Re-read the config file when dfsadmin -refreshNodes is invoked
+ so dfs.hosts and dfs.hosts.exclude are observed. (lohit vijayarenu via
+ cdouglas)
+
+ HADOOP-3485. Allow writing to files over fuse.
+ (Pete Wyckoff via dhruba)
+
+ HADOOP-3723. The flags to the libhdfs.create call can be treated as
+ a bitmask. (Pete Wyckoff via dhruba)
+
+ HADOOP-3643. Filter out completed tasks when asking for running tasks in
+ the JobTracker web/ui. (Amar Kamat via omalley)
+
+ HADOOP-3777. Ensure that Lzo compressors/decompressors correctly handle the
+ case where native libraries aren't available. (Chris Douglas via acmurthy)
+
+ HADOOP-3728. Fix SleepJob so that it doesn't depend on temporary files,
+ this ensures we can now run more than one instance of SleepJob
+ simultaneously. (Chris Douglas via acmurthy)
+
+ HADOOP-3795. Fix saving image files on Namenode with different checkpoint
+ stamps. (Lohit Vijayarenu via mahadev)
+
+ HADOOP-3624. Improving createeditslog to create tree directory structure.
+ (Lohit Vijayarenu via mahadev)
+
+ HADOOP-3778. DFSInputStream.seek() did not retry in case of some errors.
+ (LN via rangadi)
+
+ HADOOP-3661. The handling of moving files deleted through fuse-dfs to
+ Trash made similar to the behaviour from dfs shell.
+ (Pete Wyckoff via dhruba)
+
+ HADOOP-3819. Unset LANG and LC_CTYPE in saveVersion.sh to make it
+ compatible with non-English locales. (Rong-En Fan via cdouglas)
+
+ HADOOP-3848. Cache calls to getSystemDir in the TaskTracker instead of
+ calling it for each task start. (acmurthy via omalley)
+
+ HADOOP-3131. Fix reduce progress reporting for compressed intermediate
+ data. (Matei Zaharia via acmurthy)
+
+ HADOOP-3796. fuse-dfs configuration is implemented as file system
+ mount options. (Pete Wyckoff via dhruba)
+
+ HADOOP-3836. Fix TestMultipleOutputs to correctly clean up. (Alejandro
+ Abdelnur via acmurthy)
+
+ HADOOP-3805. Improve fuse-dfs write performance.
+ (Pete Wyckoff via zshao)
+
+ HADOOP-3846. Fix unit test CreateEditsLog to generate paths correctly.
+ (Lohit Vjayarenu via cdouglas)
+
+ HADOOP-3904. Fix unit tests using the old dfs package name.
+ (TszWo (Nicholas), SZE via johan)
+
+ HADOOP-3319. Fix some HOD error messages to go stderr instead of
+ stdout. (Vinod Kumar Vavilapalli via omalley)
+
+ HADOOP-3907. Move INodeDirectoryWithQuota to its own .java file.
+ (Tsz Wo (Nicholas), SZE via hairong)
+
+ HADOOP-3919. Fix attribute name in hadoop-default for
+ mapred.jobtracker.instrumentation. (Ari Rabkin via omalley)
+
+ HADOOP-3903. Change the package name for the servlets to be hdfs instead of
+ dfs. (Tsz Wo (Nicholas) Sze via omalley)
+
+ HADOOP-3773. Change Pipes to set the default map output key and value
+ types correctly. (Koji Noguchi via omalley)
+
+ HADOOP-3952. Fix compilation error in TestDataJoin referencing dfs package.
+ (omalley)
+
+ HADOOP-3951. Fix package name for FSNamesystem logs and modify other
+ hard-coded Logs to use the class name. (cdouglas)
+
+ HADOOP-3889. Improve error reporting from HftpFileSystem, handling in
+ DistCp. (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-3946. Fix TestMapRed after hadoop-3664. (tomwhite via omalley)
+
+ HADOOP-3949. Remove duplicate jars from Chukwa. (Jerome Boulon via omalley)
+
+ HADOOP-3933. DataNode sometimes sends up to io.byte.per.checksum bytes
+ more than required to client. (Ning Li via rangadi)
+
+ HADOOP-3962. Shell command "fs -count" should support paths with different
+ file systems. (Tsz Wo (Nicholas), SZE via mahadev)
+
+ HADOOP-3957. Fix javac warnings in DistCp and TestCopyFiles. (Tsz Wo
+ (Nicholas), SZE via cdouglas)
+
+ HADOOP-3958. Fix TestMapRed to check the success of test-job. (omalley via
+ acmurthy)
+
+ HADOOP-3985. Fix TestHDFSServerPorts to use random ports. (Hairong Kuang
+ via omalley)
+
+ HADOOP-3964. Fix javadoc warnings introduced by FailMon. (dhruba)
+
+ HADOOP-3785. Fix FileSystem cache to be case-insensitive for scheme and
+ authority. (Bill de hOra via cdouglas)
+
+ HADOOP-3506. Fix a rare NPE caused by error handling in S3. (Tom White via
+ cdouglas)
+
+ HADOOP-3705. Fix mapred.join parser to accept InputFormats named with
+ underscore and static, inner classes. (cdouglas)
+
+ HADOOP-4023. Fix javadoc warnings introduced when the HDFS javadoc was
+ made private. (omalley)
+
+ HADOOP-4030. Remove lzop from the default list of codecs. (Arun Murthy via
+ cdouglas)
+
+ HADOOP-3961. Fix task disk space requirement estimates for virtual
+ input jobs. Delays limiting task placement until after 10% of the maps
+ have finished. (Ari Rabkin via omalley)
+
+ HADOOP-2168. Fix problem with C++ record reader's progress not being
+ reported to framework. (acmurthy via omalley)
+
+ HADOOP-3966. Copy findbugs generated output files to PATCH_DIR while
+ running test-patch. (Ramya R via lohit)
+
+ HADOOP-4037. Fix the eclipse plugin for versions of kfs and log4j. (nigel
+ via omalley)
+
+ HADOOP-3950. Cause the Mini MR cluster to wait for task trackers to
+ register before continuing. (enis via omalley)
+
+ HADOOP-3910. Remove unused ClusterTestDFSNamespaceLogging and
+ ClusterTestDFS. (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-3954. Disable record skipping by default. (Sharad Agarwal via
+ cdouglas)
+
+ HADOOP-4050. Fix TestFairScheduler to use absolute paths for the work
+ directory. (Matei Zaharia via omalley)
+
+ HADOOP-4069. Keep temporary test files from TestKosmosFileSystem under
+ test.build.data instead of /tmp. (lohit via omalley)
+
+ HADOOP-4078. Create test files for TestKosmosFileSystem in separate
+ directory under test.build.data. (lohit)
+
+ HADOOP-3968. Fix getFileBlockLocations calls to use FileStatus instead
+ of Path reflecting the new API. (Pete Wyckoff via lohit)
+
+ HADOOP-3963. libhdfs does not exit on its own, instead it returns error
+ to the caller and behaves as a true library. (Pete Wyckoff via dhruba)
+
+ HADOOP-4100. Removes the cleanupTask scheduling from the Scheduler
+ implementations and moves it to the JobTracker.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4097. Make hive work well with speculative execution turned on.
+ (Joydeep Sen Sarma via dhruba)
+
+ HADOOP-4113. Changes to libhdfs to not exit on its own, rather return
+ an error code to the caller. (Pete Wyckoff via dhruba)
+
+ HADOOP-4054. Remove duplicate lease removal during edit log loading.
+ (hairong)
+
+ HADOOP-4071. FSNameSystem.isReplicationInProgress should add an
+ underReplicated block to the neededReplication queue using method
+ "add" not "update". (hairong)
+
+ HADOOP-4154. Fix type warnings in WritableUtils. (szetszwo via omalley)
+
+ HADOOP-4133. Log files generated by Hive should reside in the
+ build directory. (Prasad Chakka via dhruba)
+
+ HADOOP-4094. Hive now has hive-default.xml and hive-site.xml similar
+ to core hadoop. (Prasad Chakka via dhruba)
+
+ HADOOP-4112. Handles cleanupTask in JobHistory
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3831. Very slow reading clients sometimes failed while reading.
+ (rangadi)
+
+ HADOOP-4155. Use JobTracker's start time while initializing JobHistory's
+ JobTracker Unique String. (lohit)
+
+ HADOOP-4099. Fix null pointer when using HFTP from an 0.18 server.
+ (dhruba via omalley)
+
+ HADOOP-3570. Includes user specified libjar files in the client side
+ classpath path. (Sharad Agarwal via ddas)
+
+ HADOOP-4129. Changed memory limits of TaskTracker and Tasks to be in
+ KiloBytes rather than bytes. (Vinod Kumar Vavilapalli via acmurthy)
+
+ HADOOP-4139. Optimize Hive multi group-by.
+ (Namin Jain via dhruba)
+
+ HADOOP-3911. Add a check to fsck options to make sure -files is not
+ the first option to resolve conflicts with GenericOptionsParser
+ (lohit)
+
+ HADOOP-3623. Refactor LeaseManager. (szetszwo)
+
+ HADOOP-4125. Handles Reduce cleanup tip on the web ui.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4087. Hive Metastore API for php and python clients.
+ (Prasad Chakka via dhruba)
+
+ HADOOP-4197. Update DATA_TRANSFER_VERSION for HADOOP-3981. (szetszwo)
+
+ HADOOP-4138. Refactor the Hive SerDe library to better structure
+ the interfaces to the serializer and de-serializer.
+ (Zheng Shao via dhruba)
+
+ HADOOP-4195. Close compressor before returning to codec pool.
+ (acmurthy via omalley)
+
+ HADOOP-2403. Escapes some special characters before logging to
+ history files. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4200. Fix a bug in the test-patch.sh script.
+ (Ramya R via nigel)
+
+ HADOOP-4084. Add explain plan capabilities to Hive Query Language.
+ (Ashish Thusoo via dhruba)
+
+ HADOOP-4121. Preserve cause for exception if the initialization of
+ HistoryViewer for JobHistory fails. (Amareshwari Sri Ramadasu via
+ acmurthy)
+
+ HADOOP-4213. Fixes NPE in TestLimitTasksPerJobTaskScheduler.
+ (Sreekanth Ramakrishnan via ddas)
+
+ HADOOP-4077. Setting access and modification time for a file
+ requires write permissions on the file. (dhruba)
+
+ HADOOP-3592. Fix a couple of possible file leaks in FileUtil
+ (Bill de hOra via rangadi)
+
+ HADOOP-4120. Hive interactive shell records the time taken by a
+ query. (Raghotham Murthy via dhruba)
+
+ HADOOP-4090. The hive scripts pick up hadoop from HADOOP_HOME
+ and then the path. (Raghotham Murthy via dhruba)
+
+ HADOOP-4242. Remove extra ";" in FSDirectory that blocks compilation
+ in some IDE's. (szetszwo via omalley)
+
+ HADOOP-4249. Fix eclipse path to include the hsqldb.jar. (szetszwo via
+ omalley)
+
+ HADOOP-4247. Move InputSampler into org.apache.hadoop.mapred.lib, so that
+ examples.jar doesn't depend on tools.jar. (omalley)
+
+ HADOOP-4269. Fix the deprecation of LineReader by extending the new class
+ into the old name and deprecating it. Also update the tests to test the
+ new class. (cdouglas via omalley)
+
+ HADOOP-4280. Fix conversions between seconds in C and milliseconds in
+ Java for access times for files. (Pete Wyckoff via rangadi)
+
+ HADOOP-4254. -setSpaceQuota command does not convert "TB" extenstion to
+ terabytes properly. Implementation now uses StringUtils for parsing this.
+ (Raghu Angadi)
+
+ HADOOP-4259. Findbugs should run over tools.jar also. (cdouglas via
+ omalley)
+
+ HADOOP-4275. Move public method isJobValidName from JobID to a private
+ method in JobTracker. (omalley)
+
+ HADOOP-4173. fix failures in TestProcfsBasedProcessTree and
+ TestTaskTrackerMemoryManager tests. ProcfsBasedProcessTree and
+ memory management in TaskTracker are disabled on Windows.
+ (Vinod K V via rangadi)
+
+ HADOOP-4189. Fixes the history blocksize & intertracker protocol version
+ issues introduced as part of HADOOP-3245. (Amar Kamat via ddas)
+
+ HADOOP-4190. Fixes the backward compatibility issue with Job History.
+ introduced by HADOOP-3245 and HADOOP-2403. (Amar Kamat via ddas)
+
+ HADOOP-4237. Fixes the TestStreamingBadRecords.testNarrowDown testcase.
+ (Sharad Agarwal via ddas)
+
+ HADOOP-4274. Capacity scheduler accidently modifies the underlying
+ data structures when browing the job lists. (Hemanth Yamijala via omalley)
+
+ HADOOP-4309. Fix eclipse-plugin compilation. (cdouglas)
+
+ HADOOP-4232. Fix race condition in JVM reuse when multiple slots become
+ free. (ddas via acmurthy)
+
+ HADOOP-4302. Fix a race condition in TestReduceFetch that can yield false
+ negatvies. (cdouglas)
+
+ HADOOP-3942. Update distcp documentation to include features introduced in
+ HADOOP-3873, HADOOP-3939. (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-4319. fuse-dfs dfs_read function returns as many bytes as it is
+ told to read unlesss end-of-file is reached. (Pete Wyckoff via dhruba)
+
+ HADOOP-4246. Ensure we have the correct lower bound on the number of
+ retries for fetching map-outputs; also fixed the case where the reducer
+ automatically kills on too many unique map-outputs could not be fetched
+ for small jobs. (Amareshwari Sri Ramadasu via acmurthy)
+
+ HADOOP-4163. Report FSErrors from map output fetch threads instead of
+ merely logging them. (Sharad Agarwal via cdouglas)
+
+ HADOOP-4261. Adds a setup task for jobs. This is required so that we
+ don't setup jobs that haven't been inited yet (since init could lead
+ to job failure). Only after the init has successfully happened do we
+ launch the setupJob task. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4256. Removes Completed and Failed Job tables from
+ jobqueue_details.jsp. (Sreekanth Ramakrishnan via ddas)
+
+ HADOOP-4267. Occasional exceptions during shutting down HSQLDB is logged
+ but not rethrown. (enis)
+
+ HADOOP-4018. The number of tasks for a single job cannot exceed a
+ pre-configured maximum value. (dhruba)
+
+ HADOOP-4288. Fixes a NPE problem in CapacityScheduler.
+ (Amar Kamat via ddas)
+
+ HADOOP-4014. Create hard links with 'fsutil hardlink' on Windows. (shv)
+
+ HADOOP-4393. Merged org.apache.hadoop.fs.permission.AccessControlException
+ and org.apache.hadoop.security.AccessControlIOException into a single
+ class hadoop.security.AccessControlException. (omalley via acmurthy)
+
+ HADOOP-4287. Fixes an issue to do with maintaining counts of running/pending
+ maps/reduces. (Sreekanth Ramakrishnan via ddas)
+
+ HADOOP-4361. Makes sure that jobs killed from command line are killed
+ fast (i.e., there is a slot to run the cleanup task soon).
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4400. Add "hdfs://" to fs.default.name on quickstart.html.
+ (Jeff Hammerbacher via omalley)
+
+ HADOOP-4378. Fix TestJobQueueInformation to use SleepJob rather than
+ WordCount via TestMiniMRWithDFS. (Sreekanth Ramakrishnan via acmurthy)
+
+ HADOOP-4376. Fix formatting in hadoop-default.xml for
+ hadoop.http.filter.initializers. (Enis Soztutar via acmurthy)
+
+ HADOOP-4410. Adds an extra arg to the API FileUtil.makeShellPath to
+ determine whether to canonicalize file paths or not.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4236. Ensure un-initialized jobs are killed correctly on
+ user-demand. (Sharad Agarwal via acmurthy)
+
+ HADOOP-4373. Fix calculation of Guaranteed Capacity for the
+ capacity-scheduler. (Hemanth Yamijala via acmurthy)
+
+ HADOOP-4053. Schedulers must be notified when jobs complete. (Amar Kamat via omalley)
+
+ HADOOP-4335. Fix FsShell -ls for filesystems without owners/groups. (David
+ Phillips via cdouglas)
+
+ HADOOP-4426. TestCapacityScheduler broke due to the two commits HADOOP-4053
+ and HADOOP-4373. This patch fixes that. (Hemanth Yamijala via ddas)
+
+ HADOOP-4418. Updates documentation in forrest for Mapred, streaming and pipes.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3155. Ensure that there is only one thread fetching
+ TaskCompletionEvents on TaskTracker re-init. (Dhruba Borthakur via
+ acmurthy)
+
+ HADOOP-4425. Fix EditLogInputStream to overload the bulk read method.
+ (cdouglas)
+
+ HADOOP-4427. Adds the new queue/job commands to the manual.
+ (Sreekanth Ramakrishnan via ddas)
+
+ HADOOP-4278. Increase debug logging for unit test TestDatanodeDeath.
+ Fix the case when primary is dead. (dhruba via szetszwo)
+
+ HADOOP-4423. Keep block length when the block recovery is triggered by
+ append. (szetszwo)
+
+ HADOOP-4449. Fix dfsadmin usage. (Raghu Angadi via cdouglas)
+
+ HADOOP-4455. Added TestSerDe so that unit tests can run successfully.
+ (Ashish Thusoo via dhruba)
+
+ HADOOP-4457. Fixes an input split logging problem introduced by
+ HADOOP-3245. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-4464. Separate out TestFileCreationClient from TestFileCreation.
+ (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-4404. saveFSImage() removes files from a storage directory that do
+ not correspond to its type. (shv)
+
+ HADOOP-4149. Fix handling of updates to the job priority, by changing the
+ list of jobs to be keyed by the priority, submit time, and job tracker id.
+ (Amar Kamat via omalley)
+
+ HADOOP-4296. Fix job client failures by not retiring a job as soon as it
+ is finished. (dhruba)
+
+ HADOOP-4439. Remove configuration variables that aren't usable yet, in
+ particular mapred.tasktracker.tasks.maxmemory and mapred.task.max.memory.
+ (Hemanth Yamijala via omalley)
+
+ HADOOP-4230. Fix for serde2 interface, limit operator, select * operator,
+ UDF trim functions and sampling. (Ashish Thusoo via dhruba)
+
+ HADOOP-4358. No need to truncate access time in INode. Also fixes NPE
+ in CreateEditsLog. (Raghu Angadi)
+
+ HADOOP-4387. TestHDFSFileSystemContract fails on windows nightly builds.
+ (Raghu Angadi)
+
+ HADOOP-4466. Ensure that SequenceFileOutputFormat isn't tied to Writables
+ and can be used with other Serialization frameworks. (Chris Wensel via
+ acmurthy)
+
+ HADOOP-4525. Fix ipc.server.ipcnodelay originally missed in in HADOOP-2232.
+ (cdouglas via Clint Morgan)
+
+ HADOOP-4498. Ensure that JobHistory correctly escapes the job name so that
+ regex patterns work. (Chris Wensel via acmurthy)
+
+ HADOOP-4446. Modify guaranteed capacity labels in capacity scheduler's UI
+ to reflect the information being displayed. (Sreekanth Ramakrishnan via
+ yhemanth)
+
+ HADOOP-4282. Some user facing URLs are not filtered by user filters.
+ (szetszwo)
+
+ HADOOP-4595. Fixes two race conditions - one to do with updating free slot count,
+ and another to do with starting the MapEventsFetcher thread. (ddas)
+
+ HADOOP-4552. Fix a deadlock in RPC server. (Raghu Angadi)
+
+ HADOOP-4471. Sort running jobs by priority in the capacity scheduler.
+ (Amar Kamat via yhemanth)
+
+ HADOOP-4500. Fix MultiFileSplit to get the FileSystem from the relevant
+ path rather than the JobClient. (Joydeep Sen Sarma via cdouglas)
+
+Release 0.18.4 - Unreleased
+
+ BUG FIXES
+
+ HADOOP-5114. Remove timeout for accept() in DataNode. This makes accept()
+ fail in JDK on Windows and causes many tests to fail. (Raghu Angadi)
+
+ HADOOP-5192. Block receiver should not remove a block that's created or
+ being written by other threads. (hairong)
+
+ HADOOP-5134. FSNamesystem#commitBlockSynchronization adds under-construction
+ block locations to blocksMap. (Dhruba Borthakur via hairong)
+
+ HADOOP-5412. Simulated DataNode should not write to a block that's being
+ written by another thread. (hairong)
+
+ HADOOP-5465. Fix the problem of blocks remaining under-replicated by
+ providing synchronized modification to the counter xmitsInProgress in
+ DataNode. (hairong)
+
+ HADOOP-5557. Fixes some minor problems in TestOverReplicatedBlocks.
+ (szetszwo)
+
+Release 0.18.3 - 2009-01-27
+
+ IMPROVEMENTS
+
+ HADOOP-4150. Include librecordio in hadoop releases. (Giridharan Kesavan
+ via acmurthy)
+
+ HADOOP-4668. Improve documentation for setCombinerClass to clarify the
+ restrictions on combiners. (omalley)
+
+ BUG FIXES
+
+ HADOOP-4499. DFSClient should invoke checksumOk only once. (Raghu Angadi)
+
+ HADOOP-4597. Calculate mis-replicated blocks when safe-mode is turned
+ off manually. (shv)
+
+ HADOOP-3121. lsr should keep listing the remaining items but not
+ terminate if there is any IOException. (szetszwo)
+
+ HADOOP-4610. Always calculate mis-replicated blocks when safe-mode is
+ turned off. (shv)
+
+ HADOOP-3883. Limit namenode to assign at most one generation stamp for
+ a particular block within a short period. (szetszwo)
+
+ HADOOP-4556. Block went missing. (hairong)
+
+ HADOOP-4643. NameNode should exclude excessive replicas when counting
+ live replicas for a block. (hairong)
+
+ HADOOP-4703. Should not wait for proxy forever in lease recovering.
+ (szetszwo)
+
+ HADOOP-4647. NamenodeFsck should close the DFSClient it has created.
+ (szetszwo)
+
+ HADOOP-4616. Fuse-dfs can handle bad values from FileSystem.read call.
+ (Pete Wyckoff via dhruba)
+
+ HADOOP-4061. Throttle Datanode decommission monitoring in Namenode.
+ (szetszwo)
+
+ HADOOP-4659. Root cause of connection failure is being lost to code that
+ uses it for delaying startup. (Steve Loughran and Hairong via hairong)
+
+ HADOOP-4614. Lazily open segments when merging map spills to avoid using
+ too many file descriptors. (Yuri Pradkin via cdouglas)
+
+ HADOOP-4257. The DFS client should pick only one datanode as the candidate
+ to initiate lease recovery. (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-4713. Fix librecordio to handle records larger than 64k. (Christian
+ Kunz via cdouglas)
+
+ HADOOP-4635. Fix a memory leak in fuse dfs. (pete wyckoff via mahadev)
+
+ HADOOP-4714. Report status between merges and make the number of records
+ between progress reports configurable. (Jothi Padmanabhan via cdouglas)
+
+ HADOOP-4726. Fix documentation typos "the the". (Edward J. Yoon via
+ szetszwo)
+
+ HADOOP-4679. Datanode prints tons of log messages: waiting for threadgroup
+ to exit, active threads is XX. (hairong)
+
+ HADOOP-4746. Job output directory should be normalized. (hairong)
+
+ HADOOP-4717. Removal of default port# in NameNode.getUri() causes a
+ map/reduce job failed to prompt temporary output. (hairong)
+
+ HADOOP-4778. Check for zero size block meta file when updating a block.
+ (szetszwo)
+
+ HADOOP-4742. Replica gets deleted by mistake. (Wang Xu via hairong)
+
+ HADOOP-4702. Failed block replication leaves an incomplete block in
+ receiver's tmp data directory. (hairong)
+
+ HADOOP-4613. Fix block browsing on Web UI. (Johan Oskarsson via shv)
+
+ HADOOP-4806. HDFS rename should not use src path as a regular expression.
+ (szetszwo)
+
+ HADOOP-4795. Prevent lease monitor getting into an infinite loop when
+ leases and the namespace tree does not match. (szetszwo)
+
+ HADOOP-4620. Fixes Streaming to handle well the cases of map/reduce with empty
+ input/output. (Ravi Gummadi via ddas)
+
+ HADOOP-4857. Fixes TestUlimit to have exactly 1 map in the jobs spawned.
+ (Ravi Gummadi via ddas)
+
+ HADOOP-4810. Data lost at cluster startup time. (hairong)
+
+ HADOOP-4797. Improve how RPC server reads and writes large buffers. Avoids
+ soft-leak of direct buffers and excess copies in NIO layer. (Raghu Angadi)
+
+ HADOOP-4840. TestNodeCount sometimes fails with NullPointerException.
+ (hairong)
+
+ HADOOP-4904. Fix deadlock while leaving safe mode. (shv)
+
+ HADOOP-1980. 'dfsadmin -safemode enter' should prevent the namenode from
+ leaving safemode automatically. (shv)
+
+ HADOOP-4951. Lease monitor should acquire the LeaseManager lock but not the
+ Monitor lock. (szetszwo)
+
+ HADOOP-4935. processMisReplicatedBlocks() should not clear
+ excessReplicateMap. (shv)
+
+ HADOOP-4961. Fix ConcurrentModificationException in lease recovery
+ of empty files. (shv)
+
+ HADOOP-4971. A long (unexpected) delay at datanodes could make subsequent
+ block reports from many datanode at the same time. (Raghu Angadi)
+
+ HADOOP-4910. NameNode should exclude replicas when choosing excessive
+ replicas to delete to avoid data lose. (hairong)
+
+ HADOOP-4983. Fixes a problem in updating Counters in the status reporting.
+ (Amareshwari Sriramadasu via ddas)
+
+Release 0.18.2 - 2008-11-03
+
+ BUG FIXES
+
+ HADOOP-3614. Fix a bug that Datanode may use an old GenerationStamp to get
+ meta file. (szetszwo)
+
+ HADOOP-4314. Simulated datanodes should not include blocks that are still
+ being written in their block report. (Raghu Angadi)
+
+ HADOOP-4228. dfs datanode metrics, bytes_read and bytes_written, overflow
+ due to incorrect type used. (hairong)
+
+ HADOOP-4395. The FSEditLog loading is incorrect for the case OP_SET_OWNER.
+ (szetszwo)
+
+ HADOOP-4351. FSNamesystem.getBlockLocationsInternal throws
+ ArrayIndexOutOfBoundsException. (hairong)
+
+ HADOOP-4403. Make TestLeaseRecovery and TestFileCreation more robust.
+ (szetszwo)
+
+ HADOOP-4292. Do not support append() for LocalFileSystem. (hairong)
+
+ HADOOP-4399. Make fuse-dfs multi-thread access safe.
+ (Pete Wyckoff via dhruba)
+
+ HADOOP-4369. Use setMetric(...) instead of incrMetric(...) for metrics
+ averages. (Brian Bockelman via szetszwo)
+
+ HADOOP-4469. Rename and add the ant task jar file to the tar file. (nigel)
+
+ HADOOP-3914. DFSClient sends Checksum Ok only once for a block.
+ (Christian Kunz via hairong)
+
+ HADOOP-4467. SerializationFactory now uses the current context ClassLoader
+ allowing for user supplied Serialization instances. (Chris Wensel via
+ acmurthy)
+
+ HADOOP-4517. Release FSDataset lock before joining ongoing create threads.
+ (szetszwo)
+
+ HADOOP-4526. fsck failing with NullPointerException. (hairong)
+
+ HADOOP-4483 Honor the max parameter in DatanodeDescriptor.getBlockArray(..)
+ (Ahad Rana and Hairong Kuang via szetszwo)
+
+ HADOOP-4340. Correctly set the exit code from JobShell.main so that the
+ 'hadoop jar' command returns the right code to the user. (acmurthy)
+
+ NEW FEATURES
+
+ HADOOP-2421. Add jdiff output to documentation, listing all API
+ changes from the prior release. (cutting)
+
+Release 0.18.1 - 2008-09-17
+
+ IMPROVEMENTS
+
+ HADOOP-3934. Upgrade log4j to 1.2.15. (omalley)
+
+ BUG FIXES
+
+ HADOOP-3995. In case of quota failure on HDFS, rename does not restore
+ source filename. (rangadi)
+
+ HADOOP-3821. Prevent SequenceFile and IFile from duplicating codecs in
+ CodecPool when closed more than once. (Arun Murthy via cdouglas)
+
+ HADOOP-4040. Remove coded default of the IPC idle connection timeout
+ from the TaskTracker, which was causing HDFS client connections to not be
+ collected. (ddas via omalley)
+
+ HADOOP-4046. Made WritableComparable's constructor protected instead of
+ private to re-enable class derivation. (cdouglas via omalley)
+
+ HADOOP-3940. Fix in-memory merge condition to wait when there are no map
+ outputs or when the final map outputs are being fetched without contention.
+ (cdouglas)
+
+Release 0.18.0 - 2008-08-19
+
+ INCOMPATIBLE CHANGES
+
+ HADOOP-2703. The default options to fsck skips checking files
+ that are being written to. The output of fsck is incompatible
+ with previous release. (lohit vijayarenu via dhruba)
+
+ HADOOP-2865. FsShell.ls() printout format changed to print file names
+ in the end of the line. (Edward J. Yoon via shv)
+
+ HADOOP-3283. The Datanode has a RPC server. It currently supports
+ two RPCs: the first RPC retrives the metadata about a block and the
+ second RPC sets the generation stamp of an existing block.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2797. Code related to upgrading to 0.14 (Block CRCs) is
+ removed. As result, upgrade to 0.18 or later from 0.13 or earlier
+ is not supported. If upgrading from 0.13 or earlier is required,
+ please upgrade to an intermediate version (0.14-0.17) and then
+ to this version. (rangadi)
+
+ HADOOP-544. This issue introduces new classes JobID, TaskID and
+ TaskAttemptID, which should be used instead of their string counterparts.
+ Functions in JobClient, TaskReport, RunningJob, jobcontrol.Job and
+ TaskCompletionEvent that use string arguments are deprecated in favor
+ of the corresponding ones that use ID objects. Applications can use
+ xxxID.toString() and xxxID.forName() methods to convert/restore objects
+ to/from strings. (Enis Soztutar via ddas)
+
+ HADOOP-2188. RPC client sends a ping rather than throw timeouts.
+ RPC server does not throw away old RPCs. If clients and the server are on
+ different versions, they are not able to function well. In addition,
+ The property ipc.client.timeout is removed from the default hadoop
+ configuration. It also removes metrics RpcOpsDiscardedOPsNum. (hairong)
+
+ HADOOP-2181. This issue adds logging for input splits in Jobtracker log
+ and jobHistory log. Also adds web UI for viewing input splits in job UI
+ and history UI. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3226. Run combiners multiple times over map outputs as they
+ are merged in both the map and the reduce tasks. (cdouglas via omalley)
+
+ HADOOP-3329. DatanodeDescriptor objects should not be stored in the
+ fsimage. (dhruba)
+
+ HADOOP-2656. The Block object has a generation stamp inside it.
+ Existing blocks get a generation stamp of 0. This is needed to support
+ appends. (dhruba)
+
+ HADOOP-3390. Removed deprecated ClientProtocol.abandonFileInProgress().
+ (Tsz Wo (Nicholas), SZE via rangadi)
+
+ HADOOP-3405. Made some map/reduce internal classes non-public:
+ MapTaskStatus, ReduceTaskStatus, JobSubmissionProtocol,
+ CompletedJobStatusStore. (enis via omaley)
+
+ HADOOP-3265. Removed depcrecated API getFileCacheHints().
+ (Lohit Vijayarenu via rangadi)
+
+ HADOOP-3310. The namenode instructs the primary datanode to do lease
+ recovery. The block gets a new generation stamp.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2909. Improve IPC idle connection management. Property
+ ipc.client.maxidletime is removed from the default configuration,
+ instead it is defined as twice of the ipc.client.connection.maxidletime.
+ A connection with outstanding requests won't be treated as idle.
+ (hairong)
+
+ HADOOP-3459. Change in the output format of dfs -ls to more closely match
+ /bin/ls. New format is: perm repl owner group size date name
+ (Mukund Madhugiri via omally)
+
+ HADOOP-3113. An fsync invoked on a HDFS file really really
+ persists data! The datanode moves blocks in the tmp directory to
+ the real block directory on a datanode-restart. (dhruba)
+
+ HADOOP-3452. Change fsck to return non-zero status for a corrupt
+ FileSystem. (lohit vijayarenu via cdouglas)
+
+ HADOOP-3193. Include the address of the client that found the corrupted
+ block in the log. Also include a CorruptedBlocks metric to track the size
+ of the corrupted block map. (cdouglas)
+
+ HADOOP-3512. Separate out the tools into a tools jar. (omalley)
+
+ HADOOP-3598. Ensure that temporary task-output directories are not created
+ if they are not necessary e.g. for Maps with no side-effect files.
+ (acmurthy)
+
+ HADOOP-3665. Modify WritableComparator so that it only creates instances
+ of the keytype if the type does not define a WritableComparator. Calling
+ the superclass compare will throw a NullPointerException. Also define
+ a RawComparator for NullWritable and permit it to be written as a key
+ to SequenceFiles. (cdouglas)
+
+ HADOOP-3673. Avoid deadlock caused by DataNode RPC receoverBlock().
+ (Tsz Wo (Nicholas), SZE via rangadi)
+
+ NEW FEATURES
+
+ HADOOP-3074. Provides a UrlStreamHandler for DFS and other FS,
+ relying on FileSystem (taton)
+
+ HADOOP-2585. Name-node imports namespace data from a recent checkpoint
+ accessible via a NFS mount. (shv)
+
+ HADOOP-3061. Writable types for doubles and bytes. (Andrzej
+ Bialecki via omalley)
+
+ HADOOP-2857. Allow libhdfs to set jvm options. (Craig Macdonald
+ via omalley)
+
+ HADOOP-3317. Add default port for HDFS namenode. The port in
+ "hdfs:" URIs now defaults to 8020, so that one may simply use URIs
+ of the form "hdfs://example.com/dir/file". (cutting)
+
+ HADOOP-2019. Adds support for .tar, .tgz and .tar.gz files in
+ DistributedCache (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3058. Add FSNamesystem status metrics.
+ (Lohit Vjayarenu via rangadi)
+
+ HADOOP-1915. Allow users to specify counters via strings instead
+ of enumerations. (tomwhite via omalley)
+
+ HADOOP-2065. Delay invalidating corrupt replicas of block until its
+ is removed from under replicated state. If all replicas are found to
+ be corrupt, retain all copies and mark the block as corrupt.
+ (Lohit Vjayarenu via rangadi)
+
+ HADOOP-3221. Adds org.apache.hadoop.mapred.lib.NLineInputFormat, which
+ splits files into splits each of N lines. N can be specified by
+ configuration property "mapred.line.input.format.linespermap", which
+ defaults to 1. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3336. Direct a subset of annotated FSNamesystem calls for audit
+ logging. (cdouglas)
+
+ HADOOP-3400. A new API FileSystem.deleteOnExit() that facilitates
+ handling of temporary files in HDFS. (dhruba)
+
+ HADOOP-4. Add fuse-dfs to contrib, permitting one to mount an
+ HDFS filesystem on systems that support FUSE, e.g., Linux.
+ (Pete Wyckoff via cutting)
+
+ HADOOP-3246. Add FTPFileSystem. (Ankur Goel via cutting)
+
+ HADOOP-3250. Extend FileSystem API to allow appending to files.
+ (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-3177. Implement Syncable interface for FileSystem.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-1328. Implement user counters in streaming. (tomwhite via
+ omalley)
+
+ HADOOP-3187. Quotas for namespace management. (Hairong Kuang via ddas)
+
+ HADOOP-3307. Support for Archives in Hadoop. (Mahadev Konar via ddas)
+
+ HADOOP-3460. Add SequenceFileAsBinaryOutputFormat to permit direct
+ writes of serialized data. (Koji Noguchi via cdouglas)
+
+ HADOOP-3230. Add ability to get counter values from command
+ line. (tomwhite via omalley)
+
+ HADOOP-930. Add support for native S3 files. (tomwhite via cutting)
+
+ HADOOP-3502. Quota API needs documentation in Forrest. (hairong)
+
+ HADOOP-3413. Allow SequenceFile.Reader to use serialization
+ framework. (tomwhite via omalley)
+
+ HADOOP-3541. Import of the namespace from a checkpoint documented
+ in hadoop user guide. (shv)
+
+ IMPROVEMENTS
+
+ HADOOP-3677. Simplify generation stamp upgrade by making is a
+ local upgrade on datandodes. Deleted distributed upgrade.
+ (rangadi)
+
+ HADOOP-2928. Remove deprecated FileSystem.getContentLength().
+ (Lohit Vijayarenu via rangadi)
+
+ HADOOP-3130. Make the connect timeout smaller for getFile.
+ (Amar Ramesh Kamat via ddas)
+
+ HADOOP-3160. Remove deprecated exists() from ClientProtocol and
+ FSNamesystem (Lohit Vjayarenu via rangadi)
+
+ HADOOP-2910. Throttle IPC Clients during bursts of requests or
+ server slowdown. Clients retry connection for up to 15 minutes
+ when socket connection times out. (hairong)
+
+ HADOOP-3295. Allow TextOutputFormat to use configurable spearators.
+ (Zheng Shao via cdouglas).
+
+ HADOOP-3308. Improve QuickSort by excluding values eq the pivot from the
+ partition. (cdouglas)
+
+ HADOOP-2461. Trim property names in configuration.
+ (Tsz Wo (Nicholas), SZE via shv)
+
+ HADOOP-2799. Deprecate o.a.h.io.Closable in favor of java.io.Closable.
+ (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-3345. Enhance the hudson-test-patch target to cleanup messages,
+ fix minor defects, and add eclipse plugin and python unit tests. (nigel)
+
+ HADOOP-3144. Improve robustness of LineRecordReader by defining a maximum
+ line length (mapred.linerecordreader.maxlength), thereby avoiding reading
+ too far into the following split. (Zheng Shao via cdouglas)
+
+ HADOOP-3334. Move lease handling from FSNamesystem into a seperate class.
+ (Tsz Wo (Nicholas), SZE via rangadi)
+
+ HADOOP-3332. Reduces the amount of logging in Reducer's shuffle phase.
+ (Devaraj Das)
+
+ HADOOP-3355. Enhances Configuration class to accept hex numbers for getInt
+ and getLong. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3350. Add an argument to distcp to permit the user to limit the
+ number of maps. (cdouglas)
+
+ HADOOP-3013. Add corrupt block reporting to fsck.
+ (lohit vijayarenu via cdouglas)
+
+ HADOOP-3377. Remove TaskRunner::replaceAll and replace with equivalent
+ String::replace. (Brice Arnould via cdouglas)
+
+ HADOOP-3398. Minor improvement to a utility function in that participates
+ in backoff calculation. (cdouglas)
+
+ HADOOP-3381. Clear referenced when directories are deleted so that
+ effect of memory leaks are not multiplied. (rangadi)
+
+ HADOOP-2867. Adds the task's CWD to its LD_LIBRARY_PATH.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3232. DU class runs the 'du' command in a seperate thread so
+ that it does not block user. DataNode misses heartbeats in large
+ nodes otherwise. (Johan Oskarsson via rangadi)
+
+ HADOOP-3035. During block transfers between datanodes, the receiving
+ datanode, now can report corrupt replicas received from src node to
+ the namenode. (Lohit Vijayarenu via rangadi)
+
+ HADOOP-3434. Retain the cause of the bind failure in Server::bind.
+ (Steve Loughran via cdouglas)
+
+ HADOOP-3429. Increases the size of the buffers used for the communication
+ for Streaming jobs. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3486. Change default for initial block report to 0 seconds
+ and document it. (Sanjay Radia via omalley)
+
+ HADOOP-3448. Improve the text in the assertion making sure the
+ layout versions are consistent in the data node. (Steve Loughran
+ via omalley)
+
+ HADOOP-2095. Improve the Map-Reduce shuffle/merge by cutting down
+ buffer-copies; changed intermediate sort/merge to use the new IFile format
+ rather than SequenceFiles and compression of map-outputs is now
+ implemented by compressing the entire file rather than SequenceFile
+ compression. Shuffle also has been changed to use a simple byte-buffer
+ manager rather than the InMemoryFileSystem.
+ Configuration changes to hadoop-default.xml:
+ deprecated mapred.map.output.compression.type
+ (acmurthy)
+
+ HADOOP-236. JobTacker now refuses connection from a task tracker with a
+ different version number. (Sharad Agarwal via ddas)
+
+ HADOOP-3427. Improves the shuffle scheduler. It now waits for notifications
+ from shuffle threads when it has scheduled enough, before scheduling more.
+ (ddas)
+
+ HADOOP-2393. Moves the handling of dir deletions in the tasktracker to
+ a separate thread. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3501. Deprecate InMemoryFileSystem. (cutting via omalley)
+
+ HADOOP-3366. Stall the shuffle while in-memory merge is in progress.
+ (acmurthy)
+
+ HADOOP-2916. Refactor src structure, but leave package structure alone.
+ (Raghu Angadi via mukund)
+
+ HADOOP-3492. Add forrest documentation for user archives.
+ (Mahadev Konar via hairong)
+
+ HADOOP-3467. Improve documentation for FileSystem::deleteOnExit.
+ (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-3379. Documents stream.non.zero.exit.status.is.failure for Streaming.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3096. Improves documentation about the Task Execution Environment in
+ the Map-Reduce tutorial. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-2984. Add forrest documentation for DistCp. (cdouglas)
+
+ HADOOP-3406. Add forrest documentation for Profiling.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-2762. Add forrest documentation for controls of memory limits on
+ hadoop daemons and Map-Reduce tasks. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3535. Fix documentation and name of IOUtils.close to
+ reflect that it should only be used in cleanup contexts. (omalley)
+
+ HADOOP-3593. Updates the mapred tutorial. (ddas)
+
+ HADOOP-3547. Documents the way in which native libraries can be distributed
+ via the DistributedCache. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3606. Updates the Streaming doc. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3532. Add jdiff reports to the build scripts. (omalley)
+
+ HADOOP-3100. Develop tests to test the DFS command line interface. (mukund)
+
+ HADOOP-3688. Fix up HDFS docs. (Robert Chansler via hairong)
+
+ OPTIMIZATIONS
+
+ HADOOP-3274. The default constructor of BytesWritable creates empty
+ byte array. (Tsz Wo (Nicholas), SZE via shv)
+
+ HADOOP-3272. Remove redundant copy of Block object in BlocksMap.
+ (Lohit Vjayarenu via shv)
+
+ HADOOP-3164. Reduce DataNode CPU usage by using FileChannel.tranferTo().
+ On Linux DataNode takes 5 times less CPU while serving data. Results may
+ vary on other platforms. (rangadi)
+
+ HADOOP-3248. Optimization of saveFSImage. (Dhruba via shv)
+
+ HADOOP-3297. Fetch more task completion events from the job
+ tracker and task tracker. (ddas via omalley)
+
+ HADOOP-3364. Faster image and log edits loading. (shv)
+
+ HADOOP-3369. Fast block processing during name-node startup. (shv)
+
+ HADOOP-1702. Reduce buffer copies when data is written to DFS.
+ DataNodes take 30% less CPU while writing data. (rangadi)
+
+ HADOOP-3095. Speed up split generation in the FileInputSplit,
+ especially for non-HDFS file systems. Deprecates
+ InputFormat.validateInput. (tomwhite via omalley)
+
+ HADOOP-3552. Add forrest documentation for Hadoop commands.
+ (Sharad Agarwal via cdouglas)
+
+ BUG FIXES
+
+ HADOOP-2905. 'fsck -move' triggers NPE in NameNode.
+ (Lohit Vjayarenu via rangadi)
+
+ Increment ClientProtocol.versionID missed by HADOOP-2585. (shv)
+
+ HADOOP-3254. Restructure internal namenode methods that process
+ heartbeats to use well-defined BlockCommand object(s) instead of
+ using the base java Object. (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-3176. Change lease record when a open-for-write-file
+ gets renamed. (dhruba)
+
+ HADOOP-3269. Fix a case when namenode fails to restart
+ while processing a lease record. ((Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-3282. Port issues in TestCheckpoint resolved. (shv)
+
+ HADOOP-3268. file:// URLs issue in TestUrlStreamHandler under Windows.
+ (taton)
+
+ HADOOP-3127. Deleting files in trash should really remove them.
+ (Brice Arnould via omalley)
+
+ HADOOP-3300. Fix locking of explicit locks in NetworkTopology.
+ (tomwhite via omalley)
+
+ HADOOP-3270. Constant DatanodeCommands are stored in static final
+ immutable variables for better code clarity.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2793. Fix broken links for worst performing shuffle tasks in
+ the job history page. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3313. Avoid unnecessary calls to System.currentTimeMillis
+ in RPC::Invoker. (cdouglas)
+
+ HADOOP-3318. Recognize "Darwin" as an alias for "Mac OS X" to
+ support Soylatte. (Sam Pullara via omalley)
+
+ HADOOP-3301. Fix misleading error message when S3 URI hostname
+ contains an underscore. (tomwhite via omalley)
+
+ HADOOP-3338. Fix Eclipse plugin to compile after HADOOP-544 was
+ committed. Updated all references to use the new JobID representation.
+ (taton via nigel)
+
+ HADOOP-3337. Loading FSEditLog was broken by HADOOP-3283 since it
+ changed Writable serialization of DatanodeInfo. This patch handles it.
+ (Tsz Wo (Nicholas), SZE via rangadi)
+
+ HADOOP-3101. Prevent JobClient from throwing an exception when printing
+ usage. (Edward J. Yoon via cdouglas)
+
+ HADOOP-3119. Update javadoc for Text::getBytes to better describe its
+ behavior. (Tim Nelson via cdouglas)
+
+ HADOOP-2294. Fix documentation in libhdfs to refer to the correct free
+ function. (Craig Macdonald via cdouglas)
+
+ HADOOP-3335. Prevent the libhdfs build from deleting the wrong
+ files on make clean. (cutting via omalley)
+
+ HADOOP-2930. Make {start,stop}-balancer.sh work even if hadoop-daemon.sh
+ is not in the PATH. (Spiros Papadimitriou via hairong)
+
+ HADOOP-3085. Catch Exception in metrics util classes to ensure that
+ misconfigured metrics don't prevent others from updating. (cdouglas)
+
+ HADOOP-3299. CompositeInputFormat should configure the sub-input
+ formats. (cdouglas via omalley)
+
+ HADOOP-3309. Lower io.sort.mb and fs.inmemory.size.mb for MiniMRDFSSort
+ unit test so it passes on Windows. (lohit vijayarenu via cdouglas)
+
+ HADOOP-3348. TestUrlStreamHandler should set URLStreamFactory after
+ DataNodes are initialized. (Lohit Vijayarenu via rangadi)
+
+ HADOOP-3371. Ignore InstanceAlreadyExistsException from
+ MBeanUtil::registerMBean. (lohit vijayarenu via cdouglas)
+
+ HADOOP-3349. A file rename was incorrectly changing the name inside a
+ lease record. (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-3365. Removes an unnecessary copy of the key from SegmentDescriptor
+ to MergeQueue. (Devaraj Das)
+
+ HADOOP-3388. Fix for TestDatanodeBlockScanner to handle blocks with
+ generation stamps in them. (dhruba)
+
+ HADOOP-3203. Fixes TaskTracker::localizeJob to pass correct file sizes
+ for the jarfile and the jobfile. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3391. Fix a findbugs warning introduced by HADOOP-3248 (rangadi)
+
+ HADOOP-3393. Fix datanode shutdown to call DataBlockScanner::shutdown and
+ close its log, even if the scanner thread is not running. (lohit vijayarenu
+ via cdouglas)
+
+ HADOOP-3399. A debug message was logged at info level. (rangadi)
+
+ HADOOP-3396. TestDatanodeBlockScanner occationally fails.
+ (Lohit Vijayarenu via rangadi)
+
+ HADOOP-3339. Some of the failures on 3rd datanode in DFS write pipelie
+ are not detected properly. This could lead to hard failure of client's
+ write operation. (rangadi)
+
+ HADOOP-3409. Namenode should save the root inode into fsimage. (hairong)
+
+ HADOOP-3296. Fix task cache to work for more than two levels in the cache
+ hierarchy. This also adds a new counter to track cache hits at levels
+ greater than two. (Amar Kamat via cdouglas)
+
+ HADOOP-3375. Lease paths were sometimes not removed from
+ LeaseManager.sortedLeasesByPath. (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-3424. Values returned by getPartition should be checked to
+ make sure they are in the range 0 to #reduces - 1 (cdouglas via
+ omalley)
+
+ HADOOP-3408. Change FSNamesystem to send its metrics as integers to
+ accommodate collectors that don't support long values. (lohit vijayarenu
+ via cdouglas)
+
+ HADOOP-3403. Fixes a problem in the JobTracker to do with handling of lost
+ tasktrackers. (Arun Murthy via ddas)
+
+ HADOOP-1318. Completed maps are not failed if the number of reducers are
+ zero. (Amareshwari Sriramadasu via ddas).
+
+ HADOOP-3351. Fixes the history viewer tool to not do huge StringBuffer
+ allocations. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3419. Fixes TestFsck to wait for updates to happen before
+ checking results to make the test more reliable. (Lohit Vijaya
+ Renu via omalley)
+
+ HADOOP-3259. Makes failure to read system properties due to a
+ security manager non-fatal. (Edward Yoon via omalley)
+
+ HADOOP-3451. Update libhdfs to use FileSystem::getFileBlockLocations
+ instead of removed getFileCacheHints. (lohit vijayarenu via cdouglas)
+
+ HADOOP-3401. Update FileBench to set the new
+ "mapred.work.output.dir" property to work post-3041. (cdouglas via omalley)
+
+ HADOOP-2669. DFSClient locks pendingCreates appropriately. (dhruba)
+
+ HADOOP-3410. Fix KFS implemenation to return correct file
+ modification time. (Sriram Rao via cutting)
+
+ HADOOP-3340. Fix DFS metrics for BlocksReplicated, HeartbeatsNum, and
+ BlockReportsAverageTime. (lohit vijayarenu via cdouglas)
+
+ HADOOP-3435. Remove the assuption in the scripts that bash is at
+ /bin/bash and fix the test patch to require bash instead of sh.
+ (Brice Arnould via omalley)
+
+ HADOOP-3471. Fix spurious errors from TestIndexedSort and add additional
+ logging to let failures be reproducible. (cdouglas)
+
+ HADOOP-3443. Avoid copying map output across partitions when renaming a
+ single spill. (omalley via cdouglas)
+
+ HADOOP-3454. Fix Text::find to search only valid byte ranges. (Chad Whipkey
+ via cdouglas)
+
+ HADOOP-3417. Removes the static configuration variable,
+ commandLineConfig from JobClient. Moves the cli parsing from
+ JobShell to GenericOptionsParser. Thus removes the class
+ org.apache.hadoop.mapred.JobShell. (Amareshwari Sriramadasu via
+ ddas)
+
+ HADOOP-2132. Only RUNNING/PREP jobs can be killed. (Jothi Padmanabhan
+ via ddas)
+
+ HADOOP-3476. Code cleanup in fuse-dfs.
+ (Peter Wyckoff via dhruba)
+
+ HADOOP-2427. Ensure that the cwd of completed tasks is cleaned-up
+ correctly on task-completion. (Amareshwari Sri Ramadasu via acmurthy)
+
+ HADOOP-2565. Remove DFSPath cache of FileStatus.
+ (Tsz Wo (Nicholas), SZE via hairong)
+
+ HADOOP-3326. Cleanup the local-fs and in-memory merge in the ReduceTask by
+ spawing only one thread each for the on-disk and in-memory merge.
+ (Sharad Agarwal via acmurthy)
+
+ HADOOP-3493. Fix TestStreamingFailure to use FileUtil.fullyDelete to
+ ensure correct cleanup. (Lohit Vijayarenu via acmurthy)
+
+ HADOOP-3455. Fix NPE in ipc.Client in case of connection failure and
+ improve its synchronization. (hairong)
+
+ HADOOP-3240. Fix a testcase to not create files in the current directory.
+ Instead the file is created in the test directory (Mahadev Konar via ddas)
+
+ HADOOP-3496. Fix failure in TestHarFileSystem.testArchives due to change
+ in HADOOP-3095. (tomwhite)
+
+ HADOOP-3135. Get the system directory from the JobTracker instead of from
+ the conf. (Subramaniam Krishnan via ddas)
+
+ HADOOP-3503. Fix a race condition when client and namenode start
+ simultaneous recovery of the same block. (dhruba & Tsz Wo
+ (Nicholas), SZE)
+
+ HADOOP-3440. Fixes DistributedCache to not create symlinks for paths which
+ don't have fragments even when createSymLink is true.
+ (Abhijit Bagri via ddas)
+
+ HADOOP-3463. Hadoop-daemons script should cd to $HADOOP_HOME. (omalley)
+
+ HADOOP-3489. Fix NPE in SafeModeMonitor. (Lohit Vijayarenu via shv)
+
+ HADOOP-3509. Fix NPE in FSNamesystem.close. (Tsz Wo (Nicholas), SZE via
+ shv)
+
+ HADOOP-3491. Name-node shutdown causes InterruptedException in
+ ResolutionMonitor. (Lohit Vijayarenu via shv)
+
+ HADOOP-3511. Fixes namenode image to not set the root's quota to an
+ invalid value when the quota was not saved in the image. (hairong)
+
+ HADOOP-3516. Ensure the JobClient in HadoopArchives is initialized
+ with a configuration. (Subramaniam Krishnan via omalley)
+
+ HADOOP-3513. Improve NNThroughputBenchmark log messages. (shv)
+
+ HADOOP-3519. Fix NPE in DFS FileSystem rename. (hairong via tomwhite)
+
+ HADOOP-3528. Metrics FilesCreated and files_deleted metrics
+ do not match. (Lohit via Mahadev)
+
+ HADOOP-3418. When a directory is deleted, any leases that point to files
+ in the subdirectory are removed. ((Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-3542. Diables the creation of _logs directory for the archives
+ directory. (Mahadev Konar via ddas)
+
+ HADOOP-3544. Fixes a documentation issue for hadoop archives.
+ (Mahadev Konar via ddas)
+
+ HADOOP-3517. Fixes a problem in the reducer due to which the last InMemory
+ merge may be missed. (Arun Murthy via ddas)
+
+ HADOOP-3548. Fixes build.xml to copy all *.jar files to the dist.
+ (Owen O'Malley via ddas)
+
+ HADOOP-3363. Fix unformatted storage detection in FSImage. (shv)
+
+ HADOOP-3560. Fixes a problem to do with split creation in archives.
+ (Mahadev Konar via ddas)
+
+ HADOOP-3545. Fixes a overflow problem in archives.
+ (Mahadev Konar via ddas)
+
+ HADOOP-3561. Prevent the trash from deleting its parent directories.
+ (cdouglas)
+
+ HADOOP-3575. Fix the clover ant target after package refactoring.
+ (Nigel Daley via cdouglas)
+
+ HADOOP-3539. Fix the tool path in the bin/hadoop script under
+ cygwin. (Tsz Wo (Nicholas), Sze via omalley)
+
+ HADOOP-3520. TestDFSUpgradeFromImage triggers a race condition in the
+ Upgrade Manager. Fixed. (dhruba)
+
+ HADOOP-3586. Provide deprecated, backwards compatibile semantics for the
+ combiner to be run once and only once on each record. (cdouglas)
+
+ HADOOP-3533. Add deprecated methods to provide API compatibility
+ between 0.18 and 0.17. Remove the deprecated methods in trunk. (omalley)
+
+ HADOOP-3580. Fixes a problem to do with specifying a har as an input to
+ a job. (Mahadev Konar via ddas)
+
+ HADOOP-3333. Don't assign a task to a tasktracker that it failed to
+ execute earlier (used to happen in the case of lost tasktrackers where
+ the tasktracker would reinitialize and bind to a different port).
+ (Jothi Padmanabhan and Arun Murthy via ddas)
+
+ HADOOP-3534. Log IOExceptions that happen in closing the name
+ system when the NameNode shuts down. (Tsz Wo (Nicholas) Sze via omalley)
+
+ HADOOP-3546. TaskTracker re-initialization gets stuck in cleaning up.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3576. Fix NullPointerException when renaming a directory
+ to its subdirectory. (Tse Wo (Nicholas), SZE via hairong)
+
+ HADOOP-3320. Fix NullPointerException in NetworkTopology.getDistance().
+ (hairong)
+
+ HADOOP-3569. KFS input stream read() now correctly reads 1 byte
+ instead of 4. (Sriram Rao via omalley)
+
+ HADOOP-3599. Fix JobConf::setCombineOnceOnly to modify the instance rather
+ than a parameter. (Owen O'Malley via cdouglas)
+
+ HADOOP-3590. Null pointer exception in JobTracker when the task tracker is
+ not yet resolved. (Amar Ramesh Kamat via ddas)
+
+ HADOOP-3603. Fix MapOutputCollector to spill when io.sort.spill.percent is
+ 1.0 and to detect spills when emitted records write no data. (cdouglas)
+
+ HADOOP-3615. Set DatanodeProtocol.versionID to the correct value.
+ (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-3559. Fix the libhdfs test script and config to work with the
+ current semantics. (lohit vijayarenu via cdouglas)
+
+ HADOOP-3480. Need to update Eclipse template to reflect current trunk.
+ (Brice Arnould via tomwhite)
+
+ HADOOP-3588. Fixed usability issues with archives. (mahadev)
+
+ HADOOP-3635. Uncaught exception in DataBlockScanner.
+ (Tsz Wo (Nicholas), SZE via hairong)
+
+ HADOOP-3639. Exception when closing DFSClient while multiple files are
+ open. (Benjamin Gufler via hairong)
+
+ HADOOP-3572. SetQuotas usage interface has some minor bugs. (hairong)
+
+ HADOOP-3649. Fix bug in removing blocks from the corrupted block map.
+ (Lohit Vijayarenu via shv)
+
+ HADOOP-3604. Work around a JVM synchronization problem observed while
+ retrieving the address of direct buffers from compression code by obtaining
+ a lock during this call. (Arun C Murthy via cdouglas)
+
+ HADOOP-3683. Fix dfs metrics to count file listings rather than files
+ listed. (lohit vijayarenu via cdouglas)
+
+ HADOOP-3597. Fix SortValidator to use filesystems other than the default as
+ input. Validation job still runs on default fs.
+ (Jothi Padmanabhan via cdouglas)
+
+ HADOOP-3693. Fix archives, distcp and native library documentation to
+ conform to style guidelines. (Amareshwari Sriramadasu via cdouglas)
+
+ HADOOP-3653. Fix test-patch target to properly account for Eclipse
+ classpath jars. (Brice Arnould via nigel)
+
+ HADOOP-3692. Fix documentation for Cluster setup and Quick start guides.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3691. Fix streaming and tutorial docs. (Jothi Padmanabhan via ddas)
+
+ HADOOP-3630. Fix NullPointerException in CompositeRecordReader from empty
+ sources (cdouglas)
+
+ HADOOP-3706. Fix a ClassLoader issue in the mapred.join Parser that
+ prevents it from loading user-specified InputFormats.
+ (Jingkei Ly via cdouglas)
+
+ HADOOP-3718. Fix KFSOutputStream::write(int) to output a byte instead of
+ an int, per the OutputStream contract. (Sriram Rao via cdouglas)
+
+ HADOOP-3647. Add debug logs to help track down a very occassional,
+ hard-to-reproduce, bug in shuffle/merge on the reducer. (acmurthy)
+
+ HADOOP-3716. Prevent listStatus in KosmosFileSystem from returning
+ null for valid, empty directories. (Sriram Rao via cdouglas)
+
+ HADOOP-3752. Fix audit logging to record rename events. (cdouglas)
+
+ HADOOP-3737. Fix CompressedWritable to call Deflater::end to release
+ compressor memory. (Grant Glouser via cdouglas)
+
+ HADOOP-3670. Fixes JobTracker to clear out split bytes when no longer
+ required. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3755. Update gridmix to work with HOD 0.4 (Runping Qi via cdouglas)
+
+ HADOOP-3743. Fix -libjars, -files, -archives options to work even if
+ user code does not implement tools. (Amareshwari Sriramadasu via mahadev)
+
+ HADOOP-3774. Fix typos in shell output. (Tsz Wo (Nicholas), SZE via
+ cdouglas)
+
+ HADOOP-3762. Fixed FileSystem cache to work with the default port. (cutting
+ via omalley)
+
+ HADOOP-3798. Fix tests compilation. (Mukund Madhugiri via omalley)
+
+ HADOOP-3794. Return modification time instead of zero for KosmosFileSystem.
+ (Sriram Rao via cdouglas)
+
+ HADOOP-3806. Remove debug statement to stdout from QuickSort. (cdouglas)
+
+ HADOOP-3776. Fix NPE at NameNode when datanode reports a block after it is
+ deleted at NameNode. (rangadi)
+
+ HADOOP-3537. Disallow adding a datanode to a network topology when its
+ network location is not resolved. (hairong)
+
+ HADOOP-3571. Fix bug in block removal used in lease recovery. (shv)
+
+ HADOOP-3645. MetricsTimeVaryingRate returns wrong value for
+ metric_avg_time. (Lohit Vijayarenu via hairong)
+
+ HADOOP-3521. Reverted the missing cast to float for sending Counters' values
+ to Hadoop metrics which was removed by HADOOP-544. (acmurthy)
+
+ HADOOP-3820. Fixes two problems in the gridmix-env - a syntax error, and a
+ wrong definition of USE_REAL_DATASET by default. (Arun Murthy via ddas)
+
+ HADOOP-3724. Fixes two problems related to storing and recovering lease
+ in the fsimage. (dhruba)
+
+ HADOOP-3827. Fixed compression of empty map-outputs. (acmurthy)
+
+ HADOOP-3865. Remove reference to FSNamesystem from metrics preventing
+ garbage collection. (Lohit Vijayarenu via cdouglas)
+
+ HADOOP-3884. Fix so that Eclipse plugin builds against recent
+ Eclipse releases. (cutting)
+
+ HADOOP-3837. Streaming jobs report progress status. (dhruba)
+
+ HADOOP-3897. Fix a NPE in secondary namenode. (Lohit Vijayarenu via
+ cdouglas)
+
+ HADOOP-3901. Fix bin/hadoop to correctly set classpath under cygwin.
+ (Tsz Wo (Nicholas) Sze via omalley)
+
+ HADOOP-3947. Fix a problem in tasktracker reinitialization.
+ (Amareshwari Sriramadasu via ddas)
+
+Release 0.17.3 - Unreleased
+
+ IMPROVEMENTS
+
+ HADOOP-4164. Chinese translation of the documentation. (Xuebing Yan via
+ omalley)
+
+ BUG FIXES
+
+ HADOOP-4277. Checksum verification was mistakenly disabled for
+ LocalFileSystem. (Raghu Angadi)
+
+ HADOOP-4271. Checksum input stream can sometimes return invalid
+ data to the user. (Ning Li via rangadi)
+
+ HADOOP-4318. DistCp should use absolute paths for cleanup. (szetszwo)
+
+ HADOOP-4326. ChecksumFileSystem does not override create(...) correctly.
+ (szetszwo)
+
+Release 0.17.2 - 2008-08-11
+
+ BUG FIXES
+
+ HADOOP-3678. Avoid spurious exceptions logged at DataNode when clients
+ read from DFS. (rangadi)
+
+ HADOOP-3707. NameNode keeps a count of number of blocks scheduled
+ to be written to a datanode and uses it to avoid allocating more
+ blocks than a datanode can hold. (rangadi)
+
+ HADOOP-3760. Fix a bug with HDFS file close() mistakenly introduced
+ by HADOOP-3681. (Lohit Vijayarenu via rangadi)
+
+ HADOOP-3681. DFSClient can get into an infinite loop while closing
+ a file if there are some errors. (Lohit Vijayarenu via rangadi)
+
+ HADOOP-3002. Hold off block removal while in safe mode. (shv)
+
+ HADOOP-3685. Unbalanced replication target. (hairong)
+
+ HADOOP-3758. Shutdown datanode on version mismatch instead of retrying
+ continuously, preventing excessive logging at the namenode.
+ (lohit vijayarenu via cdouglas)
+
+ HADOOP-3633. Correct exception handling in DataXceiveServer, and throttle
+ the number of xceiver threads in a data-node. (shv)
+
+ HADOOP-3370. Ensure that the TaskTracker.runningJobs data-structure is
+ correctly cleaned-up on task completion. (Zheng Shao via acmurthy)
+
+ HADOOP-3813. Fix task-output clean-up on HDFS to use the recursive
+ FileSystem.delete rather than the FileUtil.fullyDelete. (Amareshwari
+ Sri Ramadasu via acmurthy)
+
+ HADOOP-3859. Allow the maximum number of xceivers in the data node to
+ be configurable. (Johan Oskarsson via omalley)
+
+ HADOOP-3931. Fix corner case in the map-side sort that causes some values
+ to be counted as too large and cause pre-mature spills to disk. Some values
+ will also bypass the combiner incorrectly. (cdouglas via omalley)
+
+Release 0.17.1 - 2008-06-23
+
+ INCOMPATIBLE CHANGES
+
+ HADOOP-3565. Fix the Java serialization, which is not enabled by
+ default, to clear the state of the serializer between objects.
+ (tomwhite via omalley)
+
+ IMPROVEMENTS
+
+ HADOOP-3522. Improve documentation on reduce pointing out that
+ input keys and values will be reused. (omalley)
+
+ HADOOP-3487. Balancer uses thread pools for managing its threads;
+ therefore provides better resource management. (hairong)
+
+ BUG FIXES
+
+ HADOOP-2159 Namenode stuck in safemode. The counter blockSafe should
+ not be decremented for invalid blocks. (hairong)
+
+ HADOOP-3472 MapFile.Reader getClosest() function returns incorrect results
+ when before is true (Todd Lipcon via Stack)
+
+ HADOOP-3442. Limit recursion depth on the stack for QuickSort to prevent
+ StackOverflowErrors. To avoid O(n*n) cases, when partitioning depth exceeds
+ a multiple of log(n), change to HeapSort. (cdouglas)
+
+ HADOOP-3477. Fix build to not package contrib/*/bin twice in
+ distributions. (Adam Heath via cutting)
+
+ HADOOP-3475. Fix MapTask to correctly size the accounting allocation of
+ io.sort.mb. (cdouglas)
+
+ HADOOP-3550. Fix the serialization data structures in MapTask where the
+ value lengths are incorrectly calculated. (cdouglas)
+
+ HADOOP-3526. Fix contrib/data_join framework by cloning values retained
+ in the reduce. (Spyros Blanas via cdouglas)
+
+ HADOOP-1979. Speed up fsck by adding a buffered stream. (Lohit
+ Vijaya Renu via omalley)
+
+Release 0.17.0 - 2008-05-18
+
+ INCOMPATIBLE CHANGES
+
+ HADOOP-2786. Move hbase out of hadoop core
+
+ HADOOP-2345. New HDFS transactions to support appending
+ to files. Disk layout version changed from -11 to -12. (dhruba)
+
+ HADOOP-2192. Error messages from "dfs mv" command improved.
+ (Mahadev Konar via dhruba)
+
+ HADOOP-1902. "dfs du" command without any arguments operates on the
+ current working directory. (Mahadev Konar via dhruba)
+
+ HADOOP-2873. Fixed bad disk format introduced by HADOOP-2345.
+ Disk layout version changed from -12 to -13. See changelist 630992
+ (dhruba)
+
+ HADOOP-1985. This addresses rack-awareness for Map tasks and for
+ HDFS in a uniform way. (ddas)
+
+ HADOOP-1986. Add support for a general serialization mechanism for
+ Map Reduce. (tomwhite)
+
+ HADOOP-771. FileSystem.delete() takes an explicit parameter that
+ specifies whether a recursive delete is intended.
+ (Mahadev Konar via dhruba)
+
+ HADOOP-2470. Remove getContentLength(String), open(String, long, long)
+ and isDir(String) from ClientProtocol. ClientProtocol version changed
+ from 26 to 27. (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-2822. Remove deprecated code for classes InputFormatBase and
+ PhasedFileSystem. (Amareshwari Sriramadasu via enis)
+
+ HADOOP-2116. Changes the layout of the task execution directory.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-2828. The following deprecated methods in Configuration.java
+ have been removed
+ getObject(String name)
+ setObject(String name, Object value)
+ get(String name, Object defaultValue)
+ set(String name, Object value)
+ Iterator entries()
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-2824. Removes one deprecated constructor from MiniMRCluster.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-2823. Removes deprecated methods getColumn(), getLine() from
+ org.apache.hadoop.record.compiler.generated.SimpleCharStream.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3060. Removes one unused constructor argument from MiniMRCluster.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-2854. Remove deprecated o.a.h.ipc.Server::getUserInfo().
+ (lohit vijayarenu via cdouglas)
+
+ HADOOP-2563. Remove deprecated FileSystem::listPaths.
+ (lohit vijayarenu via cdouglas)
+
+ HADOOP-2818. Remove deprecated methods in Counters.
+ (Amareshwari Sriramadasu via tomwhite)
+
+ HADOOP-2831. Remove deprecated o.a.h.dfs.INode::getAbsoluteName()
+ (lohit vijayarenu via cdouglas)
+
+ HADOOP-2839. Remove deprecated FileSystem::globPaths.
+ (lohit vijayarenu via cdouglas)
+
+ HADOOP-2634. Deprecate ClientProtocol::exists.
+ (lohit vijayarenu via cdouglas)
+
+ HADOOP-2410. Make EC2 cluster nodes more independent of each other.
+ Multiple concurrent EC2 clusters are now supported, and nodes may be
+ added to a cluster on the fly with new nodes starting in the same EC2
+ availability zone as the cluster. Ganglia monitoring and large
+ instance sizes have also been added. (Chris K Wensel via tomwhite)
+
+ HADOOP-2826. Deprecated FileSplit.getFile(), LineRecordReader.readLine().
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3239. getFileInfo() returns null for non-existing files instead
+ of throwing FileNotFoundException. (Lohit Vijayarenu via shv)
+
+ HADOOP-3266. Removed HOD changes from CHANGES.txt, as they are now inside
+ src/contrib/hod (Hemanth Yamijala via ddas)
+
+ HADOOP-3280. Separate the configuration of the virtual memory size
+ (mapred.child.ulimit) from the jvm heap size, so that 64 bit
+ streaming applications are supported even when running with 32 bit
+ jvms. (acmurthy via omalley)
+
+ NEW FEATURES
+
+ HADOOP-1398. Add HBase in-memory block cache. (tomwhite)
+
+ HADOOP-2178. Job History on DFS. (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2063. A new parameter to dfs -get command to fetch a file
+ even if it is corrupted. (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2219. A new command "df -count" that counts the number of
+ files and directories. (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2906. Add an OutputFormat capable of using keys, values, and
+ config params to map records to different output files.
+ (Runping Qi via cdouglas)
+
+ HADOOP-2346. Utilities to support timeout while writing to sockets.
+ DFSClient and DataNode sockets have 10min write timeout. (rangadi)
+
+ HADOOP-2951. Add a contrib module that provides a utility to
+ build or update Lucene indexes using Map/Reduce. (Ning Li via cutting)
+
+ HADOOP-1622. Allow multiple jar files for map reduce.
+ (Mahadev Konar via dhruba)
+
+ HADOOP-2055. Allows users to set PathFilter on the FileInputFormat.
+ (Alejandro Abdelnur via ddas)
+
+ HADOOP-2551. More environment variables like HADOOP_NAMENODE_OPTS
+ for better control of HADOOP_OPTS for each component. (rangadi)
+
+ HADOOP-3001. Add job counters that measure the number of bytes
+ read and written to HDFS, S3, KFS, and local file systems. (omalley)
+
+ HADOOP-3048. A new Interface and a default implementation to convert
+ and restore serializations of objects to/from strings. (enis)
+
+ IMPROVEMENTS
+
+ HADOOP-2655. Copy on write for data and metadata files in the
+ presence of snapshots. Needed for supporting appends to HDFS
+ files. (dhruba)
+
+ HADOOP-1967. When a Path specifies the same scheme as the default
+ FileSystem but no authority, the default FileSystem's authority is
+ used. Also add warnings for old-format FileSystem names, accessor
+ methods for fs.default.name, and check for null authority in HDFS.
+ (cutting)
+
+ HADOOP-2895. Let the profiling string be configurable.
+ (Martin Traverso via cdouglas)
+
+ HADOOP-910. Enables Reduces to do merges for the on-disk map output files
+ in parallel with their copying. (Amar Kamat via ddas)
+
+ HADOOP-730. Use rename rather than copy for local renames. (cdouglas)
+
+ HADOOP-2810. Updated the Hadoop Core logo. (nigel)
+
+ HADOOP-2057. Streaming should optionally treat a non-zero exit status
+ of a child process as a failed task. (Rick Cox via tomwhite)
+
+ HADOOP-2765. Enables specifying ulimits for streaming/pipes tasks (ddas)
+
+ HADOOP-2888. Make gridmix scripts more readily configurable and amenable
+ to automated execution. (Mukund Madhugiri via cdouglas)
+
+ HADOOP-2908. A document that describes the DFS Shell command.
+ (Mahadev Konar via dhruba)
+
+ HADOOP-2981. Update README.txt to reflect the upcoming use of
+ cryptography. (omalley)
+
+ HADOOP-2804. Add support to publish CHANGES.txt as HTML when running
+ the Ant 'docs' target. (nigel)
+
+ HADOOP-2559. Change DFS block placement to allocate the first replica
+ locally, the second off-rack, and the third intra-rack from the
+ second. (lohit vijayarenu via cdouglas)
+
+ HADOOP-2939. Make the automated patch testing process an executable
+ Ant target, test-patch. (nigel)
+
+ HADOOP-2239. Add HsftpFileSystem to permit transferring files over ssl.
+ (cdouglas)
+
+ HADOOP-2886. Track individual RPC metrics.
+ (girish vaitheeswaran via dhruba)
+
+ HADOOP-2373. Improvement in safe-mode reporting. (shv)
+
+ HADOOP-3091. Modify FsShell command -put to accept multiple sources.
+ (Lohit Vijaya Renu via cdouglas)
+
+ HADOOP-3092. Show counter values from job -status command.
+ (Tom White via ddas)
+
+ HADOOP-1228. Ant task to generate Eclipse project files. (tomwhite)
+
+ HADOOP-3093. Adds Configuration.getStrings(name, default-value) and
+ the corresponding setStrings. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3106. Adds documentation in forrest for debugging.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3099. Add an option to distcp to preserve user, group, and
+ permission information. (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-2841. Unwrap AccessControlException and FileNotFoundException
+ from RemoteException for DFSClient. (shv)
+
+ HADOOP-3152. Make index interval configuable when using
+ MapFileOutputFormat for map-reduce job. (Rong-En Fan via cutting)
+
+ HADOOP-3143. Decrease number of slaves from 4 to 3 in TestMiniMRDFSSort,
+ as Hudson generates false negatives under the current load.
+ (Nigel Daley via cdouglas)
+
+ HADOOP-3174. Illustrative example for MultipleFileInputFormat. (Enis
+ Soztutar via acmurthy)
+
+ HADOOP-2993. Clarify the usage of JAVA_HOME in the Quick Start guide.
+ (acmurthy via nigel)
+
+ HADOOP-3124. Make DataNode socket write timeout configurable. (rangadi)
+
+ OPTIMIZATIONS
+
+ HADOOP-2790. Fixed inefficient method hasSpeculativeTask by removing
+ repetitive calls to get the current time and late checking to see if
+ we want speculation on at all. (omalley)
+
+ HADOOP-2758. Reduce buffer copies in DataNode when data is read from
+ HDFS, without negatively affecting read throughput. (rangadi)
+
+ HADOOP-2399. Input key and value to combiner and reducer is reused.
+ (Owen O'Malley via ddas).
+
+ HADOOP-2423. Code optimization in FSNamesystem.mkdirs.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2606. ReplicationMonitor selects data-nodes to replicate directly
+ from needed replication blocks instead of looking up for the blocks for
+ each live data-node. (shv)
+
+ HADOOP-2148. Eliminate redundant data-node blockMap lookups. (shv)
+
+ HADOOP-2027. Return the number of bytes in each block in a file
+ via a single rpc to the namenode to speed up job planning.
+ (Lohit Vijaya Renu via omalley)
+
+ HADOOP-2902. Replace uses of "fs.default.name" with calls to the
+ accessor methods added in HADOOP-1967. (cutting)
+
+ HADOOP-2119. Optimize scheduling of jobs with large numbers of
+ tasks by replacing static arrays with lists of runnable tasks.
+ (Amar Kamat via omalley)
+
+ HADOOP-2919. Reduce the number of memory copies done during the
+ map output sorting. Also adds two config variables:
+ io.sort.spill.percent - the percentages of io.sort.mb that should
+ cause a spill (default 80%)
+ io.sort.record.percent - the percent of io.sort.mb that should
+ hold key/value indexes (default 5%)
+ (cdouglas via omalley)
+
+ HADOOP-3140. Doesn't add a task in the commit queue if the task hadn't
+ generated any output. (Amar Kamat via ddas)
+
+ HADOOP-3168. Reduce the amount of logging in streaming to an
+ exponentially increasing number of records (up to 10,000
+ records/log). (Zheng Shao via omalley)
+
+ BUG FIXES
+
+ HADOOP-2195. '-mkdir' behaviour is now closer to Linux shell in case of
+ errors. (Mahadev Konar via rangadi)
+
+ HADOOP-2190. bring behaviour '-ls' and '-du' closer to Linux shell
+ commands in case of errors. (Mahadev Konar via rangadi)
+
+ HADOOP-2193. 'fs -rm' and 'fs -rmr' show error message when the target
+ file does not exist. (Mahadev Konar via rangadi)
+
+ HADOOP-2738 Text is not subclassable because set(Text) and compareTo(Object)
+ access the other instance's private members directly. (jimk)
+
+ HADOOP-2779. Remove the references to HBase in the build.xml. (omalley)
+
+ HADOOP-2194. dfs cat on a non-existent file throws FileNotFoundException.
+ (Mahadev Konar via dhruba)
+
+ HADOOP-2767. Fix for NetworkTopology erroneously skipping the last leaf
+ node on a rack. (Hairong Kuang and Mark Butler via dhruba)
+
+ HADOOP-1593. FsShell works with paths in non-default FileSystem.
+ (Mahadev Konar via dhruba)
+
+ HADOOP-2191. du and dus command on non-existent directory gives
+ appropriate error message. (Mahadev Konar via dhruba)
+
+ HADOOP-2832. Remove tabs from code of DFSClient for better
+ indentation. (dhruba)
+
+ HADOOP-2844. distcp closes file handles for sequence files.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2727. Fix links in Web UI of the hadoop daemons and some docs
+ (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2871. Fixes a problem to do with file: URI in the JobHistory init.
+ (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2800. Deprecate SetFile.Writer constructor not the whole class.
+ (Johan Oskarsson via tomwhite)
+
+ HADOOP-2891. DFSClient.close() closes all open files. (dhruba)
+
+ HADOOP-2845. Fix dfsadmin disk utilization report on Solaris.
+ (Martin Traverso via tomwhite)
+
+ HADOOP-2912. MiniDFSCluster restart should wait for namenode to exit
+ safemode. This was causing TestFsck to fail. (Mahadev Konar via dhruba)
+
+ HADOOP-2820. The following classes in streaming are removed :
+ StreamLineRecordReader StreamOutputFormat StreamSequenceRecordReader.
+ (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2819. The following methods in JobConf are removed:
+ getInputKeyClass() setInputKeyClass getInputValueClass()
+ setInputValueClass(Class theClass) setSpeculativeExecution
+ getSpeculativeExecution() (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2817. Removes deprecated mapred.tasktracker.tasks.maximum and
+ ClusterStatus.getMaxTasks(). (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2821. Removes deprecated ShellUtil and ToolBase classes from
+ the util package. (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2934. The namenode was encountreing a NPE while loading
+ leases from the fsimage. Fixed. (dhruba)
+
+ HADOOP-2938. Some fs commands did not glob paths.
+ (Tsz Wo (Nicholas), SZE via rangadi)
+
+ HADOOP-2943. Compression of intermediate map output causes failures
+ in the merge. (cdouglas)
+
+ HADOOP-2870. DataNode and NameNode closes all connections while
+ shutting down. (Hairong Kuang via dhruba)
+
+ HADOOP-2973. Fix TestLocalDFS for Windows platform.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2971. select multiple times if it returns early in
+ SocketIOWithTimeout. (rangadi)
+
+ HADOOP-2955. Fix TestCrcCorruption test failures caused by HADOOP-2758
+ (rangadi)
+
+ HADOOP-2657. A flush call on the DFSOutputStream flushes the last
+ partial CRC chunk too. (dhruba)
+
+ HADOOP-2974. IPC unit tests used "0.0.0.0" to connect to server, which
+ is not always supported. (rangadi)
+
+ HADOOP-2996. Fixes uses of StringBuffer in StreamUtils class.
+ (Dave Brosius via ddas)
+
+ HADOOP-2995. Fixes StreamBaseRecordReader's getProgress to return a
+ floating point number. (Dave Brosius via ddas)
+
+ HADOOP-2972. Fix for a NPE in FSDataset.invalidate.
+ (Mahadev Konar via dhruba)
+
+ HADOOP-2994. Code cleanup for DFSClient: remove redundant
+ conversions from string to string. (Dave Brosius via dhruba)
+
+ HADOOP-3009. TestFileCreation sometimes fails because restarting
+ minidfscluster sometimes creates datanodes with ports that are
+ different from their original instance. (dhruba)
+
+ HADOOP-2992. Distributed Upgrade framework works correctly with
+ more than one upgrade object. (Konstantin Shvachko via dhruba)
+
+ HADOOP-2679. Fix a typo in libhdfs. (Jason via dhruba)
+
+ HADOOP-2976. When a lease expires, the Namenode ensures that
+ blocks of the file are adequately replicated. (dhruba)
+
+ HADOOP-2901. Fixes the creation of info servers in the JobClient
+ and JobTracker. Removes the creation from JobClient and removes
+ additional info server from the JobTracker. Also adds the command
+ line utility to view the history files (HADOOP-2896), and fixes
+ bugs in JSPs to do with analysis - HADOOP-2742, HADOOP-2792.
+ (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2890. If different datanodes report the same block but
+ with different sizes to the namenode, the namenode picks the
+ replica(s) with the largest size as the only valid replica(s). (dhruba)
+
+ HADOOP-2825. Deprecated MapOutputLocation.getFile() is removed.
+ (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2806. Fixes a streaming document.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3008. SocketIOWithTimeout throws InterruptedIOException if the
+ thread is interrupted while it is waiting. (rangadi)
+
+ HADOOP-3006. Fix wrong packet size reported by DataNode when a block
+ is being replicated. (rangadi)
+
+ HADOOP-3029. Datanode prints log message "firstbadlink" only if
+ it detects a bad connection to another datanode in the pipeline. (dhruba)
+
+ HADOOP-3030. Release reserved space for file in InMemoryFileSystem if
+ checksum reservation fails. (Devaraj Das via cdouglas)
+
+ HADOOP-3036. Fix findbugs warnings in UpgradeUtilities. (Konstantin
+ Shvachko via cdouglas)
+
+ HADOOP-3025. ChecksumFileSystem supports the delete method with
+ the recursive flag. (Mahadev Konar via dhruba)
+
+ HADOOP-3012. dfs -mv file to user home directory throws exception if
+ the user home directory does not exist. (Mahadev Konar via dhruba)
+
+ HADOOP-3066. Should not require superuser privilege to query if hdfs is in
+ safe mode (jimk)
+
+ HADOOP-3040. If the input line starts with the separator char, the key
+ is set as empty. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3080. Removes flush calls from JobHistory.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3086. Adds the testcase missed during commit of hadoop-3040.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3046. Fix the raw comparators for Text and BytesWritables
+ to use the provided length rather than recompute it. (omalley)
+
+ HADOOP-3094. Fix BytesWritable.toString to avoid extending the sign bit
+ (Owen O'Malley via cdouglas)
+
+ HADOOP-3067. DFSInputStream's position read does not close the sockets.
+ (rangadi)
+
+ HADOOP-3073. close() on SocketInputStream or SocketOutputStream should
+ close the underlying channel. (rangadi)
+
+ HADOOP-3087. Fixes a problem to do with refreshing of loadHistory.jsp.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3065. Better logging message if the rack location of a datanode
+ cannot be determined. (Devaraj Das via dhruba)
+
+ HADOOP-3064. Commas in a file path should not be treated as delimiters.
+ (Hairong Kuang via shv)
+
+ HADOOP-2997. Adds test for non-writable serialier. Also fixes a problem
+ introduced by HADOOP-2399. (Tom White via ddas)
+
+ HADOOP-3114. Fix TestDFSShell on Windows. (Lohit Vijaya Renu via cdouglas)
+
+ HADOOP-3118. Fix Namenode NPE while loading fsimage after a cluster
+ upgrade from older disk format. (dhruba)
+
+ HADOOP-3161. Fix FIleUtil.HardLink.getLinkCount on Mac OS. (nigel
+ via omalley)
+
+ HADOOP-2927. Fix TestDU to acurately calculate the expected file size.
+ (shv via nigel)
+
+ HADOOP-3123. Fix the native library build scripts to work on Solaris.
+ (tomwhite via omalley)
+
+ HADOOP-3089. Streaming should accept stderr from task before
+ first key arrives. (Rick Cox via tomwhite)
+
+ HADOOP-3146. A DFSOutputStream.flush method is renamed as
+ DFSOutputStream.fsync. (dhruba)
+
+ HADOOP-3165. -put/-copyFromLocal did not treat input file "-" as stdin.
+ (Lohit Vijayarenu via rangadi)
+
+ HADOOP-3041. Deprecate JobConf.setOutputPath and JobConf.getOutputPath.
+ Deprecate OutputFormatBase. Add FileOutputFormat. Existing output formats
+ extending OutputFormatBase, now extend FileOutputFormat. Add the following
+ APIs in FileOutputFormat: setOutputPath, getOutputPath, getWorkOutputPath.
+ (Amareshwari Sriramadasu via nigel)
+
+ HADOOP-3083. The fsimage does not store leases. This would have to be
+ reworked in the next release to support appends. (dhruba)
+
+ HADOOP-3166. Fix an ArrayIndexOutOfBoundsException in the spill thread
+ and make exception handling more promiscuous to catch this condition.
+ (cdouglas)
+
+ HADOOP-3050. DataNode sends one and only one block report after
+ it registers with the namenode. (Hairong Kuang)
+
+ HADOOP-3044. NNBench sets the right configuration for the mapper.
+ (Hairong Kuang)
+
+ HADOOP-3178. Fix GridMix scripts for small and medium jobs
+ to handle input paths differently. (Mukund Madhugiri via nigel)
+
+ HADOOP-1911. Fix an infinite loop in DFSClient when all replicas of a
+ block are bad (cdouglas)
+
+ HADOOP-3157. Fix path handling in DistributedCache and TestMiniMRLocalFS.
+ (Doug Cutting via rangadi)
+
+ HADOOP-3018. Fix the eclipse plug-in contrib wrt removed deprecated
+ methods (taton)
+
+ HADOOP-3183. Fix TestJobShell to use 'ls' instead of java.io.File::exists
+ since cygwin symlinks are unsupported.
+ (Mahadev konar via cdouglas)
+
+ HADOOP-3175. Fix FsShell.CommandFormat to handle "-" in arguments.
+ (Edward J. Yoon via rangadi)
+
+ HADOOP-3220. Safemode message corrected. (shv)
+
+ HADOOP-3208. Fix WritableDeserializer to set the Configuration on
+ deserialized Writables. (Enis Soztutar via cdouglas)
+
+ HADOOP-3224. 'dfs -du /dir' does not return correct size.
+ (Lohit Vjayarenu via rangadi)
+
+ HADOOP-3223. Fix typo in help message for -chmod. (rangadi)
+
+ HADOOP-1373. checkPath() should ignore case when it compares authoriy.
+ (Edward J. Yoon via rangadi)
+
+ HADOOP-3204. Fixes a problem to do with ReduceTask's LocalFSMerger not
+ catching Throwable. (Amar Ramesh Kamat via ddas)
+
+ HADOOP-3229. Report progress when collecting records from the mapper and
+ the combiner. (Doug Cutting via cdouglas)
+
+ HADOOP-3225. Unwrapping methods of RemoteException should initialize
+ detailedMassage field. (Mahadev Konar, shv, cdouglas)
+
+ HADOOP-3247. Fix gridmix scripts to use the correct globbing syntax and
+ change maxentToSameCluster to run the correct number of jobs.
+ (Runping Qi via cdouglas)
+
+ HADOOP-3242. Fix the RecordReader of SequenceFileAsBinaryInputFormat to
+ correctly read from the start of the split and not the beginning of the
+ file. (cdouglas via acmurthy)
+
+ HADOOP-3256. Encodes the job name used in the filename for history files.
+ (Arun Murthy via ddas)
+
+ HADOOP-3162. Ensure that comma-separated input paths are treated correctly
+ as multiple input paths. (Amareshwari Sri Ramadasu via acmurthy)
+
+ HADOOP-3263. Ensure that the job-history log file always follows the
+ pattern of hostname_timestamp_jobid_username_jobname even if username
+ and/or jobname are not specfied. This helps to avoid wrong assumptions
+ made about the job-history log filename in jobhistory.jsp. (acmurthy)
+
+ HADOOP-3251. Fixes getFilesystemName in JobTracker and LocalJobRunner to
+ use FileSystem.getUri instead of FileSystem.getName. (Arun Murthy via ddas)
+
+ HADOOP-3237. Fixes TestDFSShell.testErrOutPut on Windows platform.
+ (Mahadev Konar via ddas)
+
+ HADOOP-3279. TaskTracker checks for SUCCEEDED task status in addition to
+ COMMIT_PENDING status when it fails maps due to lost map.
+ (Devaraj Das)
+
+ HADOOP-3286. Prevent collisions in gridmix output dirs by increasing the
+ granularity of the timestamp. (Runping Qi via cdouglas)
+
+ HADOOP-3285. Fix input split locality when the splits align to
+ fs blocks. (omalley)
+
+ HADOOP-3372. Fix heap management in streaming tests. (Arun Murthy via
+ cdouglas)
+
+ HADOOP-3031. Fix javac warnings in test classes. (cdouglas)
+
+ HADOOP-3382. Fix memory leak when files are not cleanly closed (rangadi)
+
+ HADOOP-3322. Fix to push MetricsRecord for rpc metrics. (Eric Yang via
+ mukund)
+
+Release 0.16.4 - 2008-05-05
+
+ BUG FIXES
+
+ HADOOP-3138. DFS mkdirs() should not throw an exception if the directory
+ already exists. (rangadi via mukund)
+
+ HADOOP-3294. Fix distcp to check the destination length and retry the copy
+ if it doesn't match the src length. (Tsz Wo (Nicholas), SZE via mukund)
+
+ HADOOP-3186. Fix incorrect permission checkding for mv and renameTo
+ in HDFS. (Tsz Wo (Nicholas), SZE via mukund)
+
+Release 0.16.3 - 2008-04-16
+
+ BUG FIXES
+
+ HADOOP-3010. Fix ConcurrentModificationException in ipc.Server.Responder.
+ (rangadi)
+
+ HADOOP-3154. Catch all Throwables from the SpillThread in MapTask, rather
+ than IOExceptions only. (ddas via cdouglas)
+
+ HADOOP-3159. Avoid file system cache being overwritten whenever
+ configuration is modified. (Tsz Wo (Nicholas), SZE via hairong)
+
+ HADOOP-3139. Remove the consistency check for the FileSystem cache in
+ closeAll() that causes spurious warnings and a deadlock.
+ (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-3195. Fix TestFileSystem to be deterministic.
+ (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-3069. Primary name-node should not truncate image when transferring
+ it from the secondary. (shv)
+
+ HADOOP-3182. Change permissions of the job-submission directory to 777
+ from 733 to ensure sharing of HOD clusters works correctly. (Tsz Wo
+ (Nicholas), Sze and Amareshwari Sri Ramadasu via acmurthy)
+
+Release 0.16.2 - 2008-04-02
+
+ BUG FIXES
+
+ HADOOP-3011. Prohibit distcp from overwriting directories on the
+ destination filesystem with files. (cdouglas)
+
+ HADOOP-3033. The BlockReceiver thread in the datanode writes data to
+ the block file, changes file position (if needed) and flushes all by
+ itself. The PacketResponder thread does not flush block file. (dhruba)
+
+ HADOOP-2978. Fixes the JobHistory log format for counters.
+ (Runping Qi via ddas)
+
+ HADOOP-2985. Fixes LocalJobRunner to tolerate null job output path.
+ Also makes the _temporary a constant in MRConstants.java.
+ (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3003. FileSystem cache key is updated after a
+ FileSystem object is created. (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-3042. Updates the Javadoc in JobConf.getOutputPath to reflect
+ the actual temporary path. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3007. Tolerate mirror failures while DataNode is replicating
+ blocks as it used to before. (rangadi)
+
+ HADOOP-2944. Fixes a "Run on Hadoop" wizard NPE when creating a
+ Location from the wizard. (taton)
+
+ HADOOP-3049. Fixes a problem in MultiThreadedMapRunner to do with
+ catching RuntimeExceptions. (Alejandro Abdelnur via ddas)
+
+ HADOOP-3039. Fixes a problem to do with exceptions in tasks not
+ killing jobs. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3027. Fixes a problem to do with adding a shutdown hook in
+ FileSystem. (Amareshwari Sriramadasu via ddas)
+
+ HADOOP-3056. Fix distcp when the target is an empty directory by
+ making sure the directory is created first. (cdouglas and acmurthy
+ via omalley)
+
+ HADOOP-3070. Protect the trash emptier thread from null pointer
+ exceptions. (Koji Noguchi via omalley)
+
+ HADOOP-3084. Fix HftpFileSystem to work for zero-lenghth files.
+ (cdouglas)
+
+ HADOOP-3107. Fix NPE when fsck invokes getListings. (dhruba)
+
+ HADOOP-3104. Limit MultithreadedMapRunner to have a fixed length queue
+ between the RecordReader and the map threads. (Alejandro Abdelnur via
+ omalley)
+
+ HADOOP-2833. Do not use "Dr. Who" as the default user in JobClient.
+ A valid user name is required. (Tsz Wo (Nicholas), SZE via rangadi)
+
+ HADOOP-3128. Throw RemoteException in setPermissions and setOwner of
+ DistributedFileSystem. (shv via nigel)
+
+Release 0.16.1 - 2008-03-13
+
+ INCOMPATIBLE CHANGES
+
+ HADOOP-2869. Deprecate SequenceFile.setCompressionType in favor of
+ SequenceFile.createWriter, SequenceFileOutputFormat.setCompressionType,
+ and JobConf.setMapOutputCompressionType. (Arun C Murthy via cdouglas)
+ Configuration changes to hadoop-default.xml:
+ deprecated io.seqfile.compression.type
+
+ IMPROVEMENTS
+
+ HADOOP-2371. User guide for file permissions in HDFS.
+ (Robert Chansler via rangadi)
+
+ HADOOP-3098. Allow more characters in user and group names while
+ using -chown and -chgrp commands. (rangadi)
+
+ BUG FIXES
+
+ HADOOP-2789. Race condition in IPC Server Responder that could close
+ connections early. (Raghu Angadi)
+
+ HADOOP-2785. minor. Fix a typo in Datanode block verification
+ (Raghu Angadi)
+
+ HADOOP-2788. minor. Fix help message for chgrp shell command (Raghu Angadi).
+
+ HADOOP-1188. fstime file is updated when a storage directory containing
+ namespace image becomes inaccessible. (shv)
+
+ HADOOP-2787. An application can set a configuration variable named
+ dfs.umask to set the umask that is used by DFS.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2780. The default socket buffer size for DataNodes is 128K.
+ (dhruba)
+
+ HADOOP-2716. Superuser privileges for the Balancer.
+ (Tsz Wo (Nicholas), SZE via shv)
+
+ HADOOP-2754. Filter out .crc files from local file system listing.
+ (Hairong Kuang via shv)
+
+ HADOOP-2733. Fix compiler warnings in test code.
+ (Tsz Wo (Nicholas), SZE via cdouglas)
+
+ HADOOP-2725. Modify distcp to avoid leaving partially copied files at
+ the destination after encountering an error. (Tsz Wo (Nicholas), SZE
+ via cdouglas)
+
+ HADOOP-2391. Cleanup job output directory before declaring a job as
+ SUCCESSFUL. (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2808. Minor fix to FileUtil::copy to mind the overwrite
+ formal. (cdouglas)
+
+ HADOOP-2683. Moving UGI out of the RPC Server.
+ (Tsz Wo (Nicholas), SZE via shv)
+
+ HADOOP-2814. Fix for NPE in datanode in unit test TestDataTransferProtocol.
+ (Raghu Angadi via dhruba)
+
+ HADOOP-2811. Dump of counters in job history does not add comma between
+ groups. (runping via omalley)
+
+ HADOOP-2735. Enables setting TMPDIR for tasks.
+ (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2843. Fix protections on map-side join classes to enable derivation.
+ (cdouglas via omalley)
+
+ HADOOP-2840. Fix gridmix scripts to correctly invoke the java sort through
+ the proper jar. (Mukund Madhugiri via cdouglas)
+
+ HADOOP-2769. TestNNThroughputBnechmark should not use a fixed port for
+ the namenode http port. (omalley)
+
+ HADOOP-2852. Update gridmix benchmark to avoid an artifically long tail.
+ (cdouglas)
+
+ HADOOP-2894. Fix a problem to do with tasktrackers failing to connect to
+ JobTracker upon reinitialization. (Owen O'Malley via ddas).
+
+ HADOOP-2903. Fix exception generated by Metrics while using pushMetric().
+ (girish vaitheeswaran via dhruba)
+
+ HADOOP-2904. Fix to RPC metrics to log the correct host name.
+ (girish vaitheeswaran via dhruba)
+
+ HADOOP-2918. Improve error logging so that dfs writes failure with
+ "No lease on file" can be diagnosed. (dhruba)
+
+ HADOOP-2923. Add SequenceFileAsBinaryInputFormat, which was
+ missed in the commit for HADOOP-2603. (cdouglas via omalley)
+
+ HADOOP-2931. IOException thrown by DFSOutputStream had wrong stack
+ trace in some cases. (Michael Bieniosek via rangadi)
+
+ HADOOP-2883. Write failures and data corruptions on HDFS files.
+ The write timeout is back to what it was on 0.15 release. Also, the
+ datnodes flushes the block file buffered output stream before
+ sending a positive ack for the packet back to the client. (dhruba)
+
+ HADOOP-2756. NPE in DFSClient while closing DFSOutputStreams
+ under load. (rangadi)
+
+ HADOOP-2958. Fixed FileBench which broke due to HADOOP-2391 which performs
+ a check for existence of the output directory and a trivial bug in
+ GenericMRLoadGenerator where min/max word lenghts were identical since
+ they were looking at the same config variables (Chris Douglas via
+ acmurthy)
+
+ HADOOP-2915. Fixed FileSystem.CACHE so that a username is included
+ in the cache key. (Tsz Wo (Nicholas), SZE via nigel)
+
+ HADOOP-2813. TestDU unit test uses its own directory to run its
+ sequence of tests. (Mahadev Konar via dhruba)
+
+Release 0.16.0 - 2008-02-07
+
+ INCOMPATIBLE CHANGES
+
+ HADOOP-1245. Use the mapred.tasktracker.tasks.maximum value
+ configured on each tasktracker when allocating tasks, instead of
+ the value configured on the jobtracker. InterTrackerProtocol
+ version changed from 5 to 6. (Michael Bieniosek via omalley)
+
+ HADOOP-1843. Removed code from Configuration and JobConf deprecated by
+ HADOOP-785 and a minor fix to Configuration.toString. Specifically the
+ important change is that mapred-default.xml is no longer supported and
+ Configuration no longer supports the notion of default/final resources.
+ (acmurthy)
+
+ HADOOP-1302. Remove deprecated abacus code from the contrib directory.
+ This also fixes a configuration bug in AggregateWordCount, so that the
+ job now works. (enis)
+
+ HADOOP-2288. Enhance FileSystem API to support access control.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2184. RPC Support for user permissions and authentication.
+ (Raghu Angadi via dhruba)
+
+ HADOOP-2185. RPC Server uses any available port if the specified
+ port is zero. Otherwise it uses the specified port. Also combines
+ the configuration attributes for the servers' bind address and
+ port from "x.x.x.x" and "y" to "x.x.x.x:y".
+ Deprecated configuration variables:
+ dfs.info.bindAddress
+ dfs.info.port
+ dfs.datanode.bindAddress
+ dfs.datanode.port
+ dfs.datanode.info.bindAdress
+ dfs.datanode.info.port
+ dfs.secondary.info.bindAddress
+ dfs.secondary.info.port
+ mapred.job.tracker.info.bindAddress
+ mapred.job.tracker.info.port
+ mapred.task.tracker.report.bindAddress
+ tasktracker.http.bindAddress
+ tasktracker.http.port
+ New configuration variables (post HADOOP-2404):
+ dfs.secondary.http.address
+ dfs.datanode.address
+ dfs.datanode.http.address
+ dfs.http.address
+ mapred.job.tracker.http.address
+ mapred.task.tracker.report.address
+ mapred.task.tracker.http.address
+ (Konstantin Shvachko via dhruba)
+
+ HADOOP-2401. Only the current leaseholder can abandon a block for
+ a HDFS file. ClientProtocol version changed from 20 to 21.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2381. Support permission information in FileStatus. Client
+ Protocol version changed from 21 to 22. (Raghu Angadi via dhruba)
+
+ HADOOP-2110. Block report processing creates fewer transient objects.
+ Datanode Protocol version changed from 10 to 11.
+ (Sanjay Radia via dhruba)
+
+ HADOOP-2567. Add FileSystem#getHomeDirectory(), which returns the
+ user's home directory in a FileSystem as a fully-qualified path.
+ FileSystem#getWorkingDirectory() is also changed to return a
+ fully-qualified path, which can break applications that attempt
+ to, e.g., pass LocalFileSystem#getWorkingDir().toString() directly
+ to java.io methods that accept file names. (cutting)
+
+ HADOOP-2514. Change trash feature to maintain a per-user trash
+ directory, named ".Trash" in the user's home directory. The
+ "fs.trash.root" parameter is no longer used. Full source paths
+ are also no longer reproduced within the trash.
+
+ HADOOP-2012. Periodic data verification on Datanodes.
+ (Raghu Angadi via dhruba)
+
+ HADOOP-1707. The DFSClient does not use a local disk file to cache
+ writes to a HDFS file. Changed Data Transfer Version from 7 to 8.
+ (dhruba)
+
+ HADOOP-2652. Fix permission issues for HftpFileSystem. This is an
+ incompatible change since distcp may not be able to copy files
+ from cluster A (compiled with this patch) to cluster B (compiled
+ with previous versions). (Tsz Wo (Nicholas), SZE via dhruba)
+
+ NEW FEATURES
+
+ HADOOP-1857. Ability to run a script when a task fails to capture stack
+ traces. (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2299. Defination of a login interface. A simple implementation for
+ Unix users and groups. (Hairong Kuang via dhruba)
+
+ HADOOP-1652. A utility to balance data among datanodes in a HDFS cluster.
+ (Hairong Kuang via dhruba)
+
+ HADOOP-2085. A library to support map-side joins of consistently
+ partitioned and sorted data sets. (Chris Douglas via omalley)
+
+ HADOOP-2336. Shell commands to modify file permissions. (rangadi)
+
+ HADOOP-1298. Implement file permissions for HDFS.
+ (Tsz Wo (Nicholas) & taton via cutting)
+
+ HADOOP-2447. HDFS can be configured to limit the total number of
+ objects (inodes and blocks) in the file system. (dhruba)
+
+ HADOOP-2487. Added an option to get statuses for all submitted/run jobs.
+ This information can be used to develop tools for analysing jobs.
+ (Amareshwari Sri Ramadasu via acmurthy)
+
+ HADOOP-1873. Implement user permissions for Map/Reduce framework.
+ (Hairong Kuang via shv)
+
+ HADOOP-2532. Add to MapFile a getClosest method that returns the key
+ that comes just before if the key is not present. (stack via tomwhite)
+
+ HADOOP-1883. Add versioning to Record I/O. (Vivek Ratan via ddas)
+
+ HADOOP-2603. Add SeqeunceFileAsBinaryInputFormat, which reads
+ sequence files as BytesWritable/BytesWritable regardless of the
+ key and value types used to write the file. (cdouglas via omalley)
+
+ HADOOP-2367. Add ability to profile a subset of map/reduce tasks and fetch
+ the result to the local filesystem of the submitting application. Also
+ includes a general IntegerRanges extension to Configuration for setting
+ positive, ranged parameters. (Owen O'Malley via cdouglas)
+
+ IMPROVEMENTS
+
+ HADOOP-2045. Change committer list on website to a table, so that
+ folks can list their organization, timezone, etc. (cutting)
+
+ HADOOP-2058. Facilitate creating new datanodes dynamically in
+ MiniDFSCluster. (Hairong Kuang via dhruba)
+
+ HADOOP-1855. fsck verifies block placement policies and reports
+ violations. (Konstantin Shvachko via dhruba)
+
+ HADOOP-1604. An system administrator can finalize namenode upgrades
+ without running the cluster. (Konstantin Shvachko via dhruba)
+
+ HADOOP-1839. Link-ify the Pending/Running/Complete/Killed grid in
+ jobdetails.jsp to help quickly narrow down and see categorized TIPs'
+ details via jobtasks.jsp. (Amar Kamat via acmurthy)
+
+ HADOOP-1210. Log counters in job history. (Owen O'Malley via ddas)
+
+ HADOOP-1912. Datanode has two new commands COPY and REPLACE. These are
+ needed for supporting data rebalance. (Hairong Kuang via dhruba)
+
+ HADOOP-2086. This patch adds the ability to add dependencies to a job
+ (run via JobControl) after construction. (Adrian Woodhead via ddas)
+
+ HADOOP-1185. Support changing the logging level of a server without
+ restarting the server. (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2134. Remove developer-centric requirements from overview.html and
+ keep it end-user focussed, specifically sections related to subversion and
+ building Hadoop. (Jim Kellerman via acmurthy)
+
+ HADOOP-1989. Support simulated DataNodes. This helps creating large virtual
+ clusters for testing purposes. (Sanjay Radia via dhruba)
+
+ HADOOP-1274. Support different number of mappers and reducers per
+ TaskTracker to allow administrators to better configure and utilize
+ heterogenous clusters.
+ Configuration changes to hadoop-default.xml:
+ add mapred.tasktracker.map.tasks.maximum (default value of 2)
+ add mapred.tasktracker.reduce.tasks.maximum (default value of 2)
+ remove mapred.tasktracker.tasks.maximum (deprecated for 0.16.0)
+ (Amareshwari Sri Ramadasu via acmurthy)
+
+ HADOOP-2104. Adds a description to the ant targets. This makes the
+ output of "ant -projecthelp" sensible. (Chris Douglas via ddas)
+
+ HADOOP-2127. Added a pipes sort example to benchmark trivial pipes
+ application versus trivial java application. (omalley via acmurthy)
+
+ HADOOP-2113. A new shell command "dfs -text" to view the contents of
+ a gziped or SequenceFile. (Chris Douglas via dhruba)
+
+ HADOOP-2207. Add a "package" target for contrib modules that
+ permits each to determine what files are copied into release
+ builds. (stack via cutting)
+
+ HADOOP-1984. Makes the backoff for failed fetches exponential.
+ Earlier, it was a random backoff from an interval.
+ (Amar Kamat via ddas)
+
+ HADOOP-1327. Include website documentation for streaming. (Rob Weltman
+ via omalley)
+
+ HADOOP-2000. Rewrite NNBench to measure namenode performance accurately.
+ It now uses the map-reduce framework for load generation.
+ (Mukund Madhugiri via dhruba)
+
+ HADOOP-2248. Speeds up the framework w.r.t Counters. Also has API
+ updates to the Counters part. (Owen O'Malley via ddas)
+
+ HADOOP-2326. The initial block report at Datanode startup time has
+ a random backoff period. (Sanjay Radia via dhruba)
+
+ HADOOP-2432. HDFS includes the name of the file while throwing
+ "File does not exist" exception. (Jim Kellerman via dhruba)
+
+ HADOOP-2457. Added a 'forrest.home' property to the 'docs' target in
+ build.xml. (acmurthy)
+
+ HADOOP-2149. A new benchmark for three name-node operation: file create,
+ open, and block report, to evaluate the name-node performance
+ for optimizations or new features. (Konstantin Shvachko via shv)
+
+ HADOOP-2466. Change FileInputFormat.computeSplitSize to a protected
+ non-static method to allow sub-classes to provide alternate
+ implementations. (Alejandro Abdelnur via acmurthy)
+
+ HADOOP-2425. Change TextOutputFormat to handle Text specifically for better
+ performance. Make NullWritable implement Comparable. Make TextOutputFormat
+ treat NullWritable like null. (omalley)
+
+ HADOOP-1719. Improves the utilization of shuffle copier threads.
+ (Amar Kamat via ddas)
+
+ HADOOP-2390. Added documentation for user-controls for intermediate
+ map-outputs & final job-outputs and native-hadoop libraries. (acmurthy)
+
+ HADOOP-1660. Add the cwd of the map/reduce task to the java.library.path
+ of the child-jvm to support loading of native libraries distributed via
+ the DistributedCache. (acmurthy)
+
+ HADOOP-2285. Speeds up TextInputFormat. Also includes updates to the
+ Text API. (Owen O'Malley via cdouglas)
+
+ HADOOP-2233. Adds a generic load generator for modeling MR jobs. (cdouglas)
+
+ HADOOP-2369. Adds a set of scripts for simulating a mix of user map/reduce
+ workloads. (Runping Qi via cdouglas)
+
+ HADOOP-2547. Removes use of a 'magic number' in build.xml.
+ (Hrishikesh via nigel)
+
+ HADOOP-2268. Fix org.apache.hadoop.mapred.jobcontrol classes to use the
+ List/Map interfaces rather than concrete ArrayList/HashMap classes
+ internally. (Adrian Woodhead via acmurthy)
+
+ HADOOP-2406. Add a benchmark for measuring read/write performance through
+ the InputFormat interface, particularly with compression. (cdouglas)
+
+ HADOOP-2131. Allow finer-grained control over speculative-execution. Now
+ users can set it for maps and reduces independently.
+ Configuration changes to hadoop-default.xml:
+ deprecated mapred.speculative.execution
+ add mapred.map.tasks.speculative.execution
+ add mapred.reduce.tasks.speculative.execution
+ (Amareshwari Sri Ramadasu via acmurthy)
+
+ HADOOP-1965. Interleave sort/spill in teh map-task along with calls to the
+ Mapper.map method. This is done by splitting the 'io.sort.mb' buffer into
+ two and using one half for collecting map-outputs and the other half for
+ sort/spill. (Amar Kamat via acmurthy)
+
+ HADOOP-2464. Unit tests for chmod, chown, and chgrp using DFS.
+ (Raghu Angadi)
+
+ HADOOP-1876. Persist statuses of completed jobs in HDFS so that the
+ JobClient can query and get information about decommissioned jobs and also
+ across JobTracker restarts.
+ Configuration changes to hadoop-default.xml:
+ add mapred.job.tracker.persist.jobstatus.active (default value of false)
+ add mapred.job.tracker.persist.jobstatus.hours (default value of 0)
+ add mapred.job.tracker.persist.jobstatus.dir (default value of
+ /jobtracker/jobsInfo)
+ (Alejandro Abdelnur via acmurthy)
+
+ HADOOP-2077. Added version and build information to STARTUP_MSG for all
+ hadoop daemons to aid error-reporting, debugging etc. (acmurthy)
+
+ HADOOP-2398. Additional instrumentation for NameNode and RPC server.
+ Add support for accessing instrumentation statistics via JMX.
+ (Sanjay radia via dhruba)
+
+ HADOOP-2449. A return of the non-MR version of NNBench.
+ (Sanjay Radia via shv)
+
+ HADOOP-1989. Remove 'datanodecluster' command from bin/hadoop.
+ (Sanjay Radia via shv)
+
+ HADOOP-1742. Improve JavaDoc documentation for ClientProtocol, DFSClient,
+ and FSNamesystem. (Konstantin Shvachko)
+
+ HADOOP-2298. Add Ant target for a binary-only distribution.
+ (Hrishikesh via nigel)
+
+ HADOOP-2509. Add Ant target for Rat report (Apache license header
+ reports). (Hrishikesh via nigel)
+
+ HADOOP-2469. WritableUtils.clone should take a Configuration
+ instead of a JobConf. (stack via omalley)
+
+ HADOOP-2659. Introduce superuser permissions for admin operations.
+ (Tsz Wo (Nicholas), SZE via shv)
+
+ HADOOP-2596. Added a SequenceFile.createWriter api which allows the user
+ to specify the blocksize, replication factor and the buffersize to be
+ used for the underlying HDFS file. (Alejandro Abdelnur via acmurthy)
+
+ HADOOP-2431. Test HDFS File Permissions. (Hairong Kuang via shv)
+
+ HADOOP-2232. Add an option to disable Nagle's algorithm in the IPC stack.
+ (Clint Morgan via cdouglas)
+
+ HADOOP-2342. Created a micro-benchmark for measuring
+ local-file versus hdfs reads. (Owen O'Malley via nigel)
+
+ HADOOP-2529. First version of HDFS User Guide. (Raghu Angadi)
+
+ HADOOP-2690. Add jar-test target to build.xml, separating compilation
+ and packaging of the test classes. (Enis Soztutar via cdouglas)
+
+ OPTIMIZATIONS
+
+ HADOOP-1898. Release the lock protecting the last time of the last stack
+ dump while the dump is happening. (Amareshwari Sri Ramadasu via omalley)
+
+ HADOOP-1900. Makes the heartbeat and task event queries interval
+ dependent on the cluster size. (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2208. Counter update frequency (from TaskTracker to JobTracker) is
+ capped at 1 minute. (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2284. Reduce the number of progress updates during the sorting in
+ the map task. (Amar Kamat via ddas)
+
+ BUG FIXES
+
+ HADOOP-2583. Fixes a bug in the Eclipse plug-in UI to edit locations.
+ Plug-in version is now synchronized with Hadoop version.
+
+ HADOOP-2100. Remove faulty check for existence of $HADOOP_PID_DIR and let
+ 'mkdir -p' check & create it. (Michael Bieniosek via acmurthy)
+
+ HADOOP-1642. Ensure jobids generated by LocalJobRunner are unique to
+ avoid collissions and hence job-failures. (Doug Cutting via acmurthy)
+
+ HADOOP-2096. Close open file-descriptors held by streams while localizing
+ job.xml in the JobTracker and while displaying it on the webui in
+ jobconf.jsp. (Amar Kamat via acmurthy)
+
+ HADOOP-2098. Log start & completion of empty jobs to JobHistory, which
+ also ensures that we close the file-descriptor of the job's history log
+ opened during job-submission. (Amar Kamat via acmurthy)
+
+ HADOOP-2112. Adding back changes to build.xml lost while reverting
+ HADOOP-1622 i.e. http://svn.apache.org/viewvc?view=rev&revision=588771.
+ (acmurthy)
+
+ HADOOP-2089. Fixes the command line argument handling to handle multiple
+ -cacheArchive in Hadoop streaming. (Lohit Vijayarenu via ddas)
+
+ HADOOP-2071. Fix StreamXmlRecordReader to use a BufferedInputStream
+ wrapped over the DFSInputStream since mark/reset aren't supported by
+ DFSInputStream anymore. (Lohit Vijayarenu via acmurthy)
+
+ HADOOP-1348. Allow XML comments inside configuration files.
+ (Rajagopal Natarajan and Enis Soztutar via enis)
+
+ HADOOP-1952. Improve handling of invalid, user-specified classes while
+ configuring streaming jobs such as combiner, input/output formats etc.
+ Now invalid options are caught, logged and jobs are failed early. (Lohit
+ Vijayarenu via acmurthy)
+
+ HADOOP-2151. FileSystem.globPaths validates the list of Paths that
+ it returns. (Lohit Vijayarenu via dhruba)
+
+ HADOOP-2121. Cleanup DFSOutputStream when the stream encountered errors
+ when Datanodes became full. (Raghu Angadi via dhruba)
+
+ HADOOP-1130. The FileSystem.closeAll() method closes all existing
+ DFSClients. (Chris Douglas via dhruba)
+
+ HADOOP-2204. DFSTestUtil.waitReplication was not waiting for all replicas
+ to get created, thus causing unit test failure.
+ (Raghu Angadi via dhruba)
+
+ HADOOP-2078. An zero size file may have no blocks associated with it.
+ (Konstantin Shvachko via dhruba)
+
+ HADOOP-2212. ChecksumFileSystem.getSumBufferSize might throw
+ java.lang.ArithmeticException. The fix is to initialize bytesPerChecksum
+ to 0. (Michael Bieniosek via ddas)
+
+ HADOOP-2216. Fix jobtasks.jsp to ensure that it first collects the
+ taskids which satisfy the filtering criteria and then use that list to
+ print out only the required task-reports, previously it was oblivious to
+ the filtering and hence used the wrong index into the array of task-reports.
+ (Amar Kamat via acmurthy)
+
+ HADOOP-2272. Fix findbugs target to reflect changes made to the location
+ of the streaming jar file by HADOOP-2207. (Adrian Woodhead via nigel)
+
+ HADOOP-2244. Fixes the MapWritable.readFields to clear the instance
+ field variable every time readFields is called. (Michael Stack via ddas).
+
+ HADOOP-2245. Fixes LocalJobRunner to include a jobId in the mapId. Also,
+ adds a testcase for JobControl. (Adrian Woodhead via ddas).
+
+ HADOOP-2275. Fix erroneous detection of corrupted file when namenode
+ fails to allocate any datanodes for newly allocated block.
+ (Dhruba Borthakur via dhruba)
+
+ HADOOP-2256. Fix a buf in the namenode that could cause it to encounter
+ an infinite loop while deleting excess replicas that were created by
+ block rebalancing. (Hairong Kuang via dhruba)
+
+ HADOOP-2209. SecondaryNamenode process exits if it encounters exceptions
+ that it cannot handle. (Dhruba Borthakur via dhruba)
+
+ HADOOP-2314. Prevent TestBlockReplacement from occasionally getting
+ into an infinite loop. (Hairong Kuang via dhruba)
+
+ HADOOP-2300. This fixes a bug where mapred.tasktracker.tasks.maximum
+ would be ignored even if it was set in hadoop-site.xml.
+ (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2349. Improve code layout in file system transaction logging code.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2368. Fix unit tests on Windows.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2363. This fix allows running multiple instances of the unit test
+ in parallel. The bug was introduced in HADOOP-2185 that changed
+ port-rolling behaviour. (Konstantin Shvachko via dhruba)
+
+ HADOOP-2271. Fix chmod task to be non-parallel. (Adrian Woodhead via
+ omalley)
+
+ HADOOP-2313. Fail the build if building libhdfs fails. (nigel via omalley)
+
+ HADOOP-2359. Remove warning for interruptted exception when closing down
+ minidfs. (dhruba via omalley)
+
+ HADOOP-1841. Prevent slow clients from consuming threads in the NameNode.
+ (dhruba)
+
+ HADOOP-2323. JobTracker.close() should not print stack traces for
+ normal exit. (jimk via cutting)
+
+ HADOOP-2376. Prevents sort example from overriding the number of maps.
+ (Owen O'Malley via ddas)
+
+ HADOOP-2434. FSDatasetInterface read interface causes HDFS reads to occur
+ in 1 byte chunks, causing performance degradation.
+ (Raghu Angadi via dhruba)
+
+ HADOOP-2459. Fix package target so that src/docs/build files are not
+ included in the release. (nigel)
+
+ HADOOP-2215. Fix documentation in cluster_setup.html &
+ mapred_tutorial.html reflect that mapred.tasktracker.tasks.maximum has
+ been superceeded by mapred.tasktracker.{map|reduce}.tasks.maximum.
+ (Amareshwari Sri Ramadasu via acmurthy)
+
+ HADOOP-2459. Fix package target so that src/docs/build files are not
+ included in the release. (nigel)
+
+ HADOOP-2352. Remove AC_CHECK_LIB for libz and liblzo to ensure that
+ libhadoop.so doesn't have a dependency on them. (acmurthy)
+
+ HADOOP-2453. Fix the configuration for wordcount-simple example in Hadoop
+ Pipes which currently produces an XML parsing error. (Amareshwari Sri
+ Ramadasu via acmurthy)
+
+ HADOOP-2476. Unit test failure while reading permission bits of local
+ file system (on Windows) fixed. (Raghu Angadi via dhruba)
+
+ HADOOP-2247. Fine-tune the strategies for killing mappers and reducers
+ due to failures while fetching map-outputs. Now the map-completion times
+ and number of currently running reduces are taken into account by the
+ JobTracker before killing the mappers, while the progress made by the
+ reducer and the number of fetch-failures vis-a-vis total number of
+ fetch-attempts are taken into account before teh reducer kills itself.
+ (Amar Kamat via acmurthy)
+
+ HADOOP-2452. Fix eclipse plug-in build.xml to refers to the right
+ location where hadoop-*-core.jar is generated. (taton)
+
+ HADOOP-2492. Additional debugging in the rpc server to better
+ diagnose ConcurrentModificationException. (dhruba)
+
+ HADOOP-2344. Enhance the utility for executing shell commands to read the
+ stdout/stderr streams while waiting for the command to finish (to free up
+ the buffers). Also, this patch throws away stderr of the DF utility.
+ @deprecated
+ org.apache.hadoop.fs.ShellCommand for org.apache.hadoop.util.Shell
+ org.apache.hadoop.util.ShellUtil for
+ org.apache.hadoop.util.Shell.ShellCommandExecutor
+ (Amar Kamat via acmurthy)
+
+ HADOOP-2511. Fix a javadoc warning in org.apache.hadoop.util.Shell
+ introduced by HADOOP-2344. (acmurthy)
+
+ HADOOP-2442. Fix TestLocalFileSystemPermission.testLocalFSsetOwner
+ to work on more platforms. (Raghu Angadi via nigel)
+
+ HADOOP-2488. Fix a regression in random read performance.
+ (Michael Stack via rangadi)
+
+ HADOOP-2523. Fix TestDFSShell.testFilePermissions on Windows.
+ (Raghu Angadi via nigel)
+
+ HADOOP-2535. Removed support for deprecated mapred.child.heap.size and
+ fixed some indentation issues in TaskRunner. (acmurthy)
+ Configuration changes to hadoop-default.xml:
+ remove mapred.child.heap.size
+
+ HADOOP-2512. Fix error stream handling in Shell. Use exit code to
+ detect shell command errors in RawLocalFileSystem. (Raghu Angadi)
+
+ HADOOP-2446. Fixes TestHDFSServerPorts and TestMRServerPorts so they
+ do not rely on statically configured ports and cleanup better. (nigel)
+
+ HADOOP-2537. Make build process compatible with Ant 1.7.0.
+ (Hrishikesh via nigel)
+
+ HADOOP-1281. Ensure running tasks of completed map TIPs (e.g. speculative
+ tasks) are killed as soon as the TIP completed. (acmurthy)
+
+ HADOOP-2571. Suppress a suprious warning in test code. (cdouglas)
+
+ HADOOP-2481. NNBench report its progress periodically.
+ (Hairong Kuang via dhruba)
+
+ HADOOP-2601. Start name-node on a free port for TestNNThroughputBenchmark.
+ (Konstantin Shvachko)
+
+ HADOOP-2494. Set +x on contrib/*/bin/* in packaged tar bundle.
+ (stack via tomwhite)
+
+ HADOOP-2605. Remove bogus leading slash in task-tracker report bindAddress.
+ (Konstantin Shvachko)
+
+ HADOOP-2620. Trivial. 'bin/hadoop fs -help' did not list chmod, chown, and
+ chgrp. (Raghu Angadi)
+
+ HADOOP-2614. The DFS WebUI accesses are configured to be from the user
+ specified by dfs.web.ugi. (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2543. Implement a "no-permission-checking" mode for smooth
+ upgrade from a pre-0.16 install of HDFS.
+ (Hairong Kuang via dhruba)
+
+ HADOOP-290. A DataNode log message now prints the target of a replication
+ request correctly. (dhruba)
+
+ HADOOP-2538. Redirect to a warning, if plaintext parameter is true but
+ the filter parameter is not given in TaskLogServlet.
+ (Michael Bieniosek via enis)
+
+ HADOOP-2582. Prevent 'bin/hadoop fs -copyToLocal' from creating
+ zero-length files when the src does not exist.
+ (Lohit Vijayarenu via cdouglas)
+
+ HADOOP-2189. Incrementing user counters should count as progress. (ddas)
+
+ HADOOP-2649. The NameNode periodically computes replication work for
+ the datanodes. The periodicity of this computation is now configurable.
+ (dhruba)
+
+ HADOOP-2549. Correct disk size computation so that data-nodes could switch
+ to other local drives if current is full. (Hairong Kuang via shv)
+
+ HADOOP-2633. Fsck should call name-node methods directly rather than
+ through rpc. (Tsz Wo (Nicholas), SZE via shv)
+
+ HADOOP-2687. Modify a few log message generated by dfs client to be
+ logged only at INFO level. (stack via dhruba)
+
+ HADOOP-2402. Fix BlockCompressorStream to ensure it buffers data before
+ sending it down to the compressor so that each write call doesn't
+ compress. (Chris Douglas via acmurthy)
+
+ HADOOP-2645. The Metrics initialization code does not throw
+ exceptions when servers are restarted by MiniDFSCluster.
+ (Sanjay Radia via dhruba)
+
+ HADOOP-2691. Fix a race condition that was causing the DFSClient
+ to erroneously remove a good datanode from a pipeline that actually
+ had another datanode that was bad. (dhruba)
+
+ HADOOP-1195. All code in FSNamesystem checks the return value
+ of getDataNode for null before using it. (dhruba)
+
+ HADOOP-2640. Fix a bug in MultiFileSplitInputFormat that was always
+ returning 1 split in some circumstances. (Enis Soztutar via nigel)
+
+ HADOOP-2626. Fix paths with special characters to work correctly
+ with the local filesystem. (Thomas Friol via cutting)
+
+ HADOOP-2646. Fix SortValidator to work with fully-qualified
+ working directories. (Arun C Murthy via nigel)
+
+ HADOOP-2092. Added a ping mechanism to the pipes' task to periodically
+ check if the parent Java task is running, and exit if the parent isn't
+ alive and responding. (Amareshwari Sri Ramadasu via acmurthy)
+
+ HADOOP-2714. TestDecommission failed on windows because the replication
+ request was timing out. (dhruba)
+
+ HADOOP-2576. Namenode performance degradation over time triggered by
+ large heartbeat interval. (Raghu Angadi)
+
+ HADOOP-2713. TestDatanodeDeath failed on windows because the replication
+ request was timing out. (dhruba)
+
+ HADOOP-2639. Fixes a problem to do with incorrect maintenance of values
+ for runningMapTasks/runningReduceTasks. (Amar Kamat and Arun Murthy
+ via ddas)
+
+ HADOOP-2723. Fixed the check for checking whether to do user task
+ profiling. (Amareshwari Sri Ramadasu via omalley)
+
+ HADOOP-2734. Link forrest docs to new http://hadoop.apache.org
+ (Doug Cutting via nigel)
+
+ HADOOP-2641. Added Apache license headers to 95 files. (nigel)
+
+ HADOOP-2732. Fix bug in path globbing. (Hairong Kuang via nigel)
+
+ HADOOP-2404. Fix backwards compatability with hadoop-0.15 configuration
+ files that was broken by HADOOP-2185. (omalley)
+
+ HADOOP-2755. Fix fsck performance degradation because of permissions
+ issue. (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-2768. Fix performance regression caused by HADOOP-1707.
+ (dhruba borthakur via nigel)
+
+ HADOOP-3108. Fix NPE in setPermission and setOwner. (shv)
+
+Release 0.15.3 - 2008-01-18
+
+ BUG FIXES
+
+ HADOOP-2562. globPaths supports {ab,cd}. (Hairong Kuang via dhruba)
+
+ HADOOP-2540. fsck reports missing blocks incorrectly. (dhruba)
+
+ HADOOP-2570. "work" directory created unconditionally, and symlinks
+ created from the task cwds.
+
+ HADOOP-2574. Fixed mapred_tutorial.xml to correct minor errors with the
+ WordCount examples. (acmurthy)
+
+Release 0.15.2 - 2008-01-02
+
+ BUG FIXES
+
+ HADOOP-2246. Moved the changelog for HADOOP-1851 from the NEW FEATURES
+ section to the INCOMPATIBLE CHANGES section. (acmurthy)
+
+ HADOOP-2238. Fix TaskGraphServlet so that it sets the content type of
+ the response appropriately. (Paul Saab via enis)
+
+ HADOOP-2129. Fix so that distcp works correctly when source is
+ HDFS but not the default filesystem. HDFS paths returned by the
+ listStatus() method are now fully-qualified. (cutting)
+
+ HADOOP-2378. Fixes a problem where the last task completion event would
+ get created after the job completes. (Alejandro Abdelnur via ddas)
+
+ HADOOP-2228. Checks whether a job with a certain jobId is already running
+ and then tries to create the JobInProgress object.
+ (Johan Oskarsson via ddas)
+
+ HADOOP-2422. dfs -cat multiple files fail with 'Unable to write to
+ output stream'. (Raghu Angadi via dhruba)
+
+ HADOOP-2460. When the namenode encounters ioerrors on writing a
+ transaction log, it stops writing new transactions to that one.
+ (Raghu Angadi via dhruba)
+
+ HADOOP-2227. Use the LocalDirAllocator uniformly for handling all of the
+ temporary storage required for a given task. It also implies that
+ mapred.local.dir.minspacestart is handled by checking if there is enough
+ free-space on any one of the available disks. (Amareshwari Sri Ramadasu
+ via acmurthy)
+
+ HADOOP-2437. Fix the LocalDirAllocator to choose the seed for the
+ round-robin disk selections randomly. This helps in spreading data across
+ multiple partitions much better. (acmurhty)
+
+ HADOOP-2486. When the list of files from the InMemoryFileSystem is obtained
+ for merging, this patch will ensure that only those files whose checksums
+ have also got created (renamed) are returned. (ddas)
+
+ HADOOP-2456. Hardcode English locale to prevent NumberFormatException
+ from occurring when starting the NameNode with certain locales.
+ (Matthias Friedrich via nigel)
+
+ IMPROVEMENTS
+
+ HADOOP-2160. Remove project-level, non-user documentation from
+ releases, since it's now maintained in a separate tree. (cutting)
+
+ HADOOP-1327. Add user documentation for streaming. (cutting)
+
+ HADOOP-2382. Add hadoop-default.html to subversion. (cutting)
+
+ HADOOP-2158. hdfsListDirectory calls FileSystem.listStatus instead
+ of FileSystem.listPaths. This reduces the number of RPC calls on the
+ namenode, thereby improving scalability. (Christian Kunz via dhruba)
+
+Release 0.15.1 - 2007-11-27
+
+ INCOMPATIBLE CHANGES
+
+ HADOOP-713. Reduce CPU usage on namenode while listing directories.
+ FileSystem.listPaths does not return the size of the entire subtree.
+ Introduced a new API ClientProtocol.getContentLength that returns the
+ size of the subtree. (Dhruba Borthakur via dhruba)
+
+ IMPROVEMENTS
+
+ HADOOP-1917. Addition of guides/tutorial for better overall
+ documentation for Hadoop. Specifically:
+ * quickstart.html is targetted towards first-time users and helps them
+ setup a single-node cluster and play with Hadoop.
+ * cluster_setup.html helps admins to configure and setup non-trivial
+ hadoop clusters.
+ * mapred_tutorial.html is a comprehensive Map-Reduce tutorial.
+ (acmurthy)
+
+ BUG FIXES
+
+ HADOOP-2174. Removed the unnecessary Reporter.setStatus call from
+ FSCopyFilesMapper.close which led to a NPE since the reporter isn't valid
+ in the close method. (Chris Douglas via acmurthy)
+
+ HADOOP-2172. Restore performance of random access to local files
+ by caching positions of local input streams, avoiding a system
+ call. (cutting)
+
+ HADOOP-2205. Regenerate the Hadoop website since some of the changes made
+ by HADOOP-1917 weren't correctly copied over to the trunk/docs directory.
+ Also fixed a couple of minor typos and broken links. (acmurthy)
+
+Release 0.15.0 - 2007-11-2
+
+ INCOMPATIBLE CHANGES
+
+ HADOOP-1708. Make files appear in namespace as soon as they are
+ created. (Dhruba Borthakur via dhruba)
+
+ HADOOP-999. A HDFS Client immediately informs the NameNode of a new
+ file creation. ClientProtocol version changed from 14 to 15.
+ (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-932. File locking interfaces and implementations (that were
+ earlier deprecated) are removed. Client Protocol version changed
+ from 15 to 16. (Raghu Angadi via dhruba)
+
+ HADOOP-1621. FileStatus is now a concrete class and FileSystem.listPaths
+ is deprecated and replaced with listStatus. (Chris Douglas via omalley)
+
+ HADOOP-1656. The blockSize of a file is stored persistently in the file
+ inode. (Dhruba Borthakur via dhruba)
+
+ HADOOP-1838. The blocksize of files created with an earlier release is
+ set to the default block size. (Dhruba Borthakur via dhruba)
+
+ HADOOP-785. Add support for 'final' Configuration parameters,
+ removing support for 'mapred-default.xml', and changing
+ 'hadoop-site.xml' to not override other files. Now folks should
+ generally use 'hadoop-site.xml' for all configurations. Values
+ with a 'final' tag may not be overridden by subsequently loaded
+ configuration files, e.g., by jobs. (Arun C. Murthy via cutting)
+
+ HADOOP-1846. DatanodeReport in ClientProtocol can report live
+ datanodes, dead datanodes or all datanodes. Client Protocol version
+ changed from 17 to 18. (Hairong Kuang via dhruba)
+
+ HADOOP-1851. Permit specification of map output compression type
+ and codec, independent of the final output's compression
+ parameters. (Arun C Murthy via cutting)
+
+ HADOOP-1819. Jobtracker cleanups, including binding ports before
+ clearing state directories, so that inadvertently starting a
+ second jobtracker doesn't trash one that's already running. Removed
+ method JobTracker.getTracker() because the static variable, which
+ stored the value caused initialization problems.
+ (omalley via cutting)
+
+ NEW FEATURES
+
+ HADOOP-89. A client can access file data even before the creator
+ has closed the file. Introduce a new command "tail" from dfs shell.
+ (Dhruba Borthakur via dhruba)
+
+ HADOOP-1636. Allow configuration of the number of jobs kept in
+ memory by the JobTracker. (Michael Bieniosek via omalley)
+
+ HADOOP-1667. Reorganize CHANGES.txt into sections to make it
+ easier to read. Also remove numbering, to make merging easier.
+ (cutting)
+
+ HADOOP-1610. Add metrics for failed tasks.
+ (Devaraj Das via tomwhite)
+
+ HADOOP-1767. Add "bin/hadoop job -list" sub-command. (taton via cutting)
+
+ HADOOP-1351. Add "bin/hadoop job [-fail-task|-kill-task]" sub-commands
+ to terminate a particular task-attempt. (Enis Soztutar via acmurthy)
+
+ HADOOP-1880. SleepJob : An example job that sleeps at each map and
+ reduce task. (enis)
+
+ HADOOP-1809. Add a link in web site to #hadoop IRC channel. (enis)
+
+ HADOOP-1894. Add percentage graphs and mapred task completion graphs
+ to Web User Interface. Users not using Firefox may install a plugin to
+ their browsers to see svg graphics. (enis)
+
+ HADOOP-1914. Introduce a new NamenodeProtocol to allow secondary
+ namenodes and rebalancing processes to communicate with a primary
+ namenode. (Hairong Kuang via dhruba)
+
+ HADOOP-1963. Add a FileSystem implementation for the Kosmos
+ Filesystem (KFS). (Sriram Rao via cutting)
+
+ HADOOP-1822. Allow the specialization and configuration of socket
+ factories. Provide a StandardSocketFactory, and a SocksSocketFactory to
+ allow the use of SOCKS proxies. (taton).
+
+ HADOOP-1968. FileSystem supports wildcard input syntax "{ }".
+ (Hairong Kuang via dhruba)
+
+ HADOOP-2566. Add globStatus method to the FileSystem interface
+ and deprecate globPath and listPath. (Hairong Kuang via hairong)
+
+ OPTIMIZATIONS
+
+ HADOOP-1910. Reduce the number of RPCs that DistributedFileSystem.create()
+ makes to the namenode. (Raghu Angadi via dhruba)
+
+ HADOOP-1565. Reduce memory usage of NameNode by replacing
+ TreeMap in HDFS Namespace with ArrayList.
+ (Dhruba Borthakur via dhruba)
+
+ HADOOP-1743. Change DFS INode from a nested class to standalone
+ class, with specialized subclasses for directories and files, to
+ save memory on the namenode. (Konstantin Shvachko via cutting)
+
+ HADOOP-1759. Change file name in INode from String to byte[],
+ saving memory on the namenode. (Konstantin Shvachko via cutting)
+
+ HADOOP-1766. Save memory in namenode by having BlockInfo extend
+ Block, and replace many uses of Block with BlockInfo.
+ (Konstantin Shvachko via cutting)
+
+ HADOOP-1687. Save memory in namenode by optimizing BlockMap
+ representation. (Konstantin Shvachko via cutting)
+
+ HADOOP-1774. Remove use of INode.parent in Block CRC upgrade.
+ (Raghu Angadi via dhruba)
+
+ HADOOP-1788. Increase the buffer size on the Pipes command socket.
+ (Amareshwari Sri Ramadasu and Christian Kunz via omalley)
+
+ BUG FIXES
+
+ HADOOP-1946. The Datanode code does not need to invoke du on
+ every heartbeat. (Hairong Kuang via dhruba)
+
+ HADOOP-1935. Fix a NullPointerException in internalReleaseCreate.
+ (Dhruba Borthakur)
+
+ HADOOP-1933. The nodes listed in include and exclude files
+ are always listed in the datanode report.
+ (Raghu Angadi via dhruba)
+
+ HADOOP-1953. The job tracker should wait beteween calls to try and delete
+ the system directory (Owen O'Malley via devaraj)
+
+ HADOOP-1932. TestFileCreation fails with message saying filestatus.dat
+ is of incorrect size. (Dhruba Borthakur via dhruba)
+
+ HADOOP-1573. Support for 0 reducers in PIPES.
+ (Owen O'Malley via devaraj)
+
+ HADOOP-1500. Fix typographical errors in the DFS WebUI.
+ (Nigel Daley via dhruba)
+
+ HADOOP-1076. Periodic checkpoint can continue even if an earlier
+ checkpoint encountered an error. (Dhruba Borthakur via dhruba)
+
+ HADOOP-1887. The Namenode encounters an ArrayIndexOutOfBoundsException
+ while listing a directory that had a file that was
+ being actively written to. (Dhruba Borthakur via dhruba)
+
+ HADOOP-1904. The Namenode encounters an exception because the
+ list of blocks per datanode-descriptor was corrupted.
+ (Konstantin Shvachko via dhruba)
+
+ HADOOP-1762. The Namenode fsimage does not contain a list of
+ Datanodes. (Raghu Angadi via dhruba)
+
+ HADOOP-1890. Removed debugging prints introduced by HADOOP-1774.
+ (Raghu Angadi via dhruba)
+
+ HADOOP-1763. Too many lost task trackers on large clusters due to
+ insufficient number of RPC handler threads on the JobTracker.
+ (Devaraj Das)
+
+ HADOOP-1463. HDFS report correct usage statistics for disk space
+ used by HDFS. (Hairong Kuang via dhruba)
+
+ HADOOP-1692. In DFS ant task, don't cache the Configuration.
+ (Chris Douglas via cutting)
+
+ HADOOP-1726. Remove lib/jetty-ext/ant.jar. (omalley)
+
+ HADOOP-1772. Fix hadoop-daemon.sh script to get correct hostname
+ under Cygwin. (Tsz Wo (Nicholas), SZE via cutting)
+
+ HADOOP-1749. Change TestDFSUpgrade to sort files, fixing sporadic
+ test failures. (Enis Soztutar via cutting)
+
+ HADOOP-1748. Fix tasktracker to be able to launch tasks when log
+ directory is relative. (omalley via cutting)
+
+ HADOOP-1775. Fix a NullPointerException and an
+ IllegalArgumentException in MapWritable.
+ (Jim Kellerman via cutting)
+
+ HADOOP-1795. Fix so that jobs can generate output file names with
+ special characters. (Fr??d??ric Bertin via cutting)
+
+ HADOOP-1810. Fix incorrect value type in MRBench (SmallJobs)
+ (Devaraj Das via tomwhite)
+
+ HADOOP-1806. Fix ant task to compile again, also fix default
+ builds to compile ant tasks. (Chris Douglas via cutting)
+
+ HADOOP-1758. Fix escape processing in librecordio to not be
+ quadratic. (Vivek Ratan via cutting)
+
+ HADOOP-1817. Fix MultiFileSplit to read and write the split
+ length, so that it is not always zero in map tasks.
+ (Thomas Friol via cutting)
+
+ HADOOP-1853. Fix contrib/streaming to accept multiple -cacheFile
+ options. (Prachi Gupta via cutting)
+
+ HADOOP-1818. Fix MultiFileInputFormat so that it does not return
+ empty splits when numPaths < numSplits. (Thomas Friol via enis)
+
+ HADOOP-1840. Fix race condition which leads to task's diagnostic
+ messages getting lost. (acmurthy)
+
+ HADOOP-1885. Fix race condition in MiniDFSCluster shutdown.
+ (Chris Douglas via nigel)
+
+ HADOOP-1889. Fix path in EC2 scripts for building your own AMI.
+ (tomwhite)
+
+ HADOOP-1892. Fix a NullPointerException in the JobTracker when
+ trying to fetch a task's diagnostic messages from the JobClient.
+ (Amar Kamat via acmurthy)
+
+ HADOOP-1897. Completely remove about.html page from the web site.
+ (enis)
+
+ HADOOP-1907. Fix null pointer exception when getting task diagnostics
+ in JobClient. (Christian Kunz via omalley)
+
+ HADOOP-1882. Remove spurious asterisks from decimal number displays.
+ (Raghu Angadi via cutting)
+
+ HADOOP-1783. Make S3 FileSystem return Paths fully-qualified with
+ scheme and host. (tomwhite)
+
+ HADOOP-1925. Make pipes' autoconf script look for libsocket and libnsl, so
+ that it can compile under Solaris. (omalley)
+
+ HADOOP-1940. TestDFSUpgradeFromImage must shut down its MiniDFSCluster.
+ (Chris Douglas via nigel)
+
+ HADOOP-1930. Fix the blame for failed fetchs on the right host. (Arun C.
+ Murthy via omalley)
+
+ HADOOP-1934. Fix the platform name on Mac to use underscores rather than
+ spaces. (omalley)
+
+ HADOOP-1959. Use "/" instead of File.separator in the StatusHttpServer.
+ (jimk via omalley)
+
+ HADOOP-1626. Improve dfsadmin help messages.
+ (Lohit Vijayarenu via dhruba)
+
+ HADOOP-1695. The SecondaryNamenode waits for the Primary NameNode to
+ start up. (Dhruba Borthakur)
+
+ HADOOP-1983. Have Pipes flush the command socket when progress is sent
+ to prevent timeouts during long computations. (omalley)
+
+ HADOOP-1875. Non-existant directories or read-only directories are
+ filtered from dfs.client.buffer.dir. (Hairong Kuang via dhruba)
+
+ HADOOP-1992. Fix the performance degradation in the sort validator.
+ (acmurthy via omalley)
+
+ HADOOP-1874. Move task-outputs' promotion/discard to a separate thread
+ distinct from the main heartbeat-processing thread. The main upside being
+ that we do not lock-up the JobTracker during HDFS operations, which
+ otherwise may lead to lost tasktrackers if the NameNode is unresponsive.
+ (Devaraj Das via acmurthy)
+
+ HADOOP-2026. Namenode prints out one log line for "Number of transactions"
+ at most once every minute. (Dhruba Borthakur)
+
+ HADOOP-2022. Ensure that status information for successful tasks is correctly
+ recorded at the JobTracker, so that, for example, one may view correct
+ information via taskdetails.jsp. This bug was introduced by HADOOP-1874.
+ (Amar Kamat via acmurthy)
+
+ HADOOP-2031. Correctly maintain the taskid which takes the TIP to
+ completion, failing which the case of lost tasktrackers isn't handled
+ properly i.e. the map TIP is incorrectly left marked as 'complete' and it
+ is never rescheduled elsewhere, leading to hung reduces.
+ (Devaraj Das via acmurthy)
+
+ HADOOP-2018. The source datanode of a data transfer waits for
+ a response from the target datanode before closing the data stream.
+ (Hairong Kuang via dhruba)
+
+ HADOOP-2023. Disable TestLocalDirAllocator on Windows.
+ (Hairong Kuang via nigel)
+
+ HADOOP-2016. Ignore status-updates from FAILED/KILLED tasks at the
+ TaskTracker. This fixes a race-condition which caused the tasks to wrongly
+ remain in the RUNNING state even after being killed by the JobTracker and
+ thus handicap the cleanup of the task's output sub-directory. (acmurthy)
+
+ HADOOP-1771. Fix a NullPointerException in streaming caused by an
+ IOException in MROutputThread. (lohit vijayarenu via nigel)
+
+ HADOOP-2028. Fix distcp so that the log dir does not need to be
+ specified and the destination does not need to exist.
+ (Chris Douglas via nigel)
+
+ HADOOP-2044. The namenode protects all lease manipulations using a
+ sortedLease lock. (Dhruba Borthakur)
+
+ HADOOP-2051. The TaskCommit thread should not die for exceptions other
+ than the InterruptedException. This behavior is there for the other long
+ running threads in the JobTracker. (Arun C Murthy via ddas)
+
+ HADOOP-1973. The FileSystem object would be accessed on the JobTracker
+ through a RPC in the InterTrackerProtocol. The check for the object being
+ null was missing and hence NPE would be thrown sometimes. This issue fixes
+ that problem. (Amareshwari Sri Ramadasu via ddas)
+
+ HADOOP-2033. The SequenceFile.Writer.sync method was a no-op, which caused
+ very uneven splits for applications like distcp that count on them.
+ (omalley)
+
+ HADOOP-2070. Added a flush method to pipes' DownwardProtocol and call
+ that before waiting for the application to finish to ensure all buffered
+ data is flushed. (Owen O'Malley via acmurthy)
+
+ HADOOP-2080. Fixed calculation of the checksum file size when the values
+ are large. (omalley)
+
+ HADOOP-2048. Change error handling in distcp so that each map copies
+ as much as possible before reporting the error. Also report progress on
+ every copy. (Chris Douglas via omalley)
+
+ HADOOP-2073. Change size of VERSION file after writing contents to it.
+ (Konstantin Shvachko via dhruba)
+
+ HADOOP-2102. Fix the deprecated ToolBase to pass its Configuration object
+ to the superceding ToolRunner to ensure it picks up the appropriate
+ configuration resources. (Dennis Kubes and Enis Soztutar via acmurthy)
+
+ HADOOP-2103. Fix minor javadoc bugs introduce by HADOOP-2046. (Nigel
+ Daley via acmurthy)
+
+ IMPROVEMENTS
+
+ HADOOP-1908. Restructure data node code so that block sending and
+ receiving are seperated from data transfer header handling.
+ (Hairong Kuang via dhruba)
+
+ HADOOP-1921. Save the configuration of completed/failed jobs and make them
+ available via the web-ui. (Amar Kamat via devaraj)
+
+ HADOOP-1266. Remove dependency of package org.apache.hadoop.net on
+ org.apache.hadoop.dfs. (Hairong Kuang via dhruba)
+
+ HADOOP-1779. Replace INodeDirectory.getINode() by a getExistingPathINodes()
+ to allow the retrieval of all existing INodes along a given path in a
+ single lookup. This facilitates removal of the 'parent' field in the
+ inode. (Christophe Taton via dhruba)
+
+ HADOOP-1756. Add toString() to some Writable-s. (ab)
+
+ HADOOP-1727. New classes: MapWritable and SortedMapWritable.
+ (Jim Kellerman via ab)
+
+ HADOOP-1651. Improve progress reporting.
+ (Devaraj Das via tomwhite)
+
+ HADOOP-1595. dfsshell can wait for a file to achieve its intended
+ replication target. (Tsz Wo (Nicholas), SZE via dhruba)
+
+ HADOOP-1693. Remove un-needed log fields in DFS replication classes,
+ since the log may be accessed statically. (Konstantin Shvachko via cutting)
+
+ HADOOP-1231. Add generics to Mapper and Reducer interfaces.
+ (tomwhite via cutting)
+
+ HADOOP-1436. Improved command-line APIs, so that all tools need
+ not subclass ToolBase, and generic parameter parser is public.
+ (Enis Soztutar via cutting)
+
+ HADOOP-1703. DFS-internal code cleanups, removing several uses of
+ the obsolete UTF8. (Christophe Taton via cutting)
+
+ HADOOP-1731. Add Hadoop's version to contrib jar file names.
+ (cutting)
+
+ HADOOP-1689. Make shell scripts more portable. All shell scripts
+ now explicitly depend on bash, but do not require that bash be
+ installed in a particular location, as long as it is on $PATH.
+ (cutting)
+
+ HADOOP-1744. Remove many uses of the deprecated UTF8 class from
+ the HDFS namenode. (Christophe Taton via cutting)
+
+ HADOOP-1654. Add IOUtils class, containing generic io-related
+ utility methods. (Enis Soztutar via cutting)
+
+ HADOOP-1158. Change JobTracker to record map-output transmission
+ errors and use them to trigger speculative re-execution of tasks.
+ (Arun C Murthy via cutting)
+
+ HADOOP-1601. Change GenericWritable to use ReflectionUtils for
+ instance creation, avoiding classloader issues, and to implement
+ Configurable. (Enis Soztutar via cutting)
+
+ HADOOP-1750. Log standard output and standard error when forking
+ task processes. (omalley via cutting)
+
+ HADOOP-1803. Generalize build.xml to make files in all
+ src/contrib/*/bin directories executable. (stack via cutting)
+
+ HADOOP-1739. Let OS always choose the tasktracker's umbilical
+ port. Also switch default address for umbilical connections to
+ loopback. (cutting)
+
+ HADOOP-1812. Let OS choose ports for IPC and RPC unit tests. (cutting)
+
+ HADOOP-1825. Create $HADOOP_PID_DIR when it does not exist.
+ (Michael Bieniosek via cutting)
+
+ HADOOP-1425. Replace uses of ToolBase with the Tool interface.
+ (Enis Soztutar via cutting)
+
+ HADOOP-1569. Reimplement DistCP to use the standard FileSystem/URI
+ code in Hadoop so that you can copy from and to all of the supported file
+ systems.(Chris Douglas via omalley)
+
+ HADOOP-1018. Improve documentation w.r.t handling of lost hearbeats between
+ TaskTrackers and JobTracker. (acmurthy)
+
+ HADOOP-1718. Add ant targets for measuring code coverage with clover.
+ (simonwillnauer via nigel)
+
+ HADOOP-1592. Log error messages to the client console when tasks
+ fail. (Amar Kamat via cutting)
+
+ HADOOP-1879. Remove some unneeded casts. (Nilay Vaish via cutting)
+
+ HADOOP-1878. Add space between priority links on job details
+ page. (Thomas Friol via cutting)
+
+ HADOOP-120. In ArrayWritable, prevent creation with null value
+ class, and improve documentation. (Cameron Pope via cutting)
+
+ HADOOP-1926. Add a random text writer example/benchmark so that we can
+ benchmark compression codecs on random data. (acmurthy via omalley)
+
+ HADOOP-1906. Warn the user if they have an obsolete madred-default.xml
+ file in their configuration directory. (acmurthy via omalley)
+
+ HADOOP-1971. Warn when job does not specify a jar. (enis via cutting)
+
+ HADOOP-1942. Increase the concurrency of transaction logging to
+ edits log. Reduce the number of syncs by double-buffering the changes
+ to the transaction log. (Dhruba Borthakur)
+
+ HADOOP-2046. Improve mapred javadoc. (Arun C. Murthy via cutting)
+
+ HADOOP-2105. Improve overview.html to clarify supported platforms,
+ software pre-requisites for hadoop, how to install them on various
+ platforms and a better general description of hadoop and it's utility.
+ (Jim Kellerman via acmurthy)
+
+
+Release 0.14.4 - 2007-11-26
+
+ BUG FIXES
+
+ HADOOP-2140. Add missing Apache Licensing text at the front of several
+ C and C++ files.
+
+ HADOOP-2169. Fix the DT_SONAME field of libhdfs.so to set it to the
+ correct value of 'libhdfs.so', currently it is set to the absolute path of
+ libhdfs.so. (acmurthy)
+
+ HADOOP-2001. Make the job priority updates and job kills synchronized on
+ the JobTracker. Deadlock was seen in the JobTracker because of the lack of
+ this synchronization. (Arun C Murthy via ddas)
+
+
+Release 0.14.3 - 2007-10-19
+
+ BUG FIXES
+
+ HADOOP-2053. Fixed a dangling reference to a memory buffer in the map
+ output sorter. (acmurthy via omalley)
+
+ HADOOP-2036. Fix a NullPointerException in JvmMetrics class. (nigel)
+
+ HADOOP-2043. Release 0.14.2 was compiled with Java 1.6 rather than
+ Java 1.5. (cutting)
+
+
+Release 0.14.2 - 2007-10-09
+
+ BUG FIXES
+
+ HADOOP-1948. Removed spurious error message during block crc upgrade.
+ (Raghu Angadi via dhruba)
+
+ HADOOP-1862. reduces are getting stuck trying to find map outputs.
+ (Arun C. Murthy via ddas)
+
+ HADOOP-1977. Fixed handling of ToolBase cli options in JobClient.
+ (enis via omalley)
+
+ HADOOP-1972. Fix LzoCompressor to ensure the user has actually asked
+ to finish compression. (arun via omalley)
+
+ HADOOP-1970. Fix deadlock in progress reporting in the task. (Vivek
+ Ratan via omalley)
+
+ HADOOP-1978. Name-node removes edits.new after a successful startup.
+ (Konstantin Shvachko via dhruba)
+
+ HADOOP-1955. The Namenode tries to not pick the same source Datanode for
+ a replication request if the earlier replication request for the same
+ block and that source Datanode had failed.
+ (Raghu Angadi via dhruba)
+
+ HADOOP-1961. The -get option to dfs-shell works when a single filename
+ is specified. (Raghu Angadi via dhruba)
+
+ HADOOP-1997. TestCheckpoint closes the edits file after writing to it,
+ otherwise the rename of this file on Windows fails.
+ (Konstantin Shvachko via dhruba)
+
+Release 0.14.1 - 2007-09-04
+
+ BUG FIXES
+
+ HADOOP-1740. Fix null pointer exception in sorting map outputs. (Devaraj
+ Das via omalley)
+
+ HADOOP-1790. Fix tasktracker to work correctly on multi-homed
+ boxes. (Torsten Curdt via cutting)
+
+ HADOOP-1798. Fix jobtracker to correctly account for failed
+ tasks. (omalley via cutting)
+
+
+Release 0.14.0 - 2007-08-17
+
+ INCOMPATIBLE CHANGES
+
+ 1. HADOOP-1134.
+ CONFIG/API - dfs.block.size must now be a multiple of
+ io.byte.per.checksum, otherwise new files can not be written.
+ LAYOUT - DFS layout version changed from -6 to -7, which will require an
+ upgrade from previous versions.
+ PROTOCOL - Datanode RPC protocol version changed from 7 to 8.
+
+ 2. HADOOP-1283
+ API - deprecated file locking API.
+
+ 3. HADOOP-894
+ PROTOCOL - changed ClientProtocol to fetch parts of block locations.
+
+ 4. HADOOP-1336
+ CONFIG - Enable speculative execution by default.
+
+ 5. HADOOP-1197
+ API - deprecated method for Configuration.getObject, because
+ Configurations should only contain strings.
+
+ 6. HADOOP-1343
+ API - deprecate Configuration.set(String,Object) so that only strings are
+ put in Configrations.
+
+ 7. HADOOP-1207
+ CLI - Fix FsShell 'rm' command to continue when a non-existent file is
+ encountered.
+
+ 8. HADOOP-1473
+ CLI/API - Job, TIP, and Task id formats have changed and are now unique
+ across job tracker restarts.
+
+ 9. HADOOP-1400
+ API - JobClient constructor now takes a JobConf object instead of a
+ Configuration object.
+
+ NEW FEATURES and BUG FIXES
+
+ 1. HADOOP-1197. In Configuration, deprecate getObject() and add
+ getRaw(), which skips variable expansion. (omalley via cutting)
+
+ 2. HADOOP-1343. In Configuration, deprecate set(String,Object) and
+ implement Iterable. (omalley via cutting)
+
+ 3. HADOOP-1344. Add RunningJob#getJobName(). (Michael Bieniosek via cutting)
+
+ 4. HADOOP-1342. In aggregators, permit one to limit the number of
+ unique values per key. (Runping Qi via cutting)
+
+ 5. HADOOP-1340. Set the replication factor of the MD5 file in the filecache
+ to be the same as the replication factor of the original file.
+ (Dhruba Borthakur via tomwhite.)
+
+ 6. HADOOP-1355. Fix null pointer dereference in
+ TaskLogAppender.append(LoggingEvent). (Arun C Murthy via tomwhite.)
+
+ 7. HADOOP-1357. Fix CopyFiles to correctly avoid removing "/".
+ (Arun C Murthy via cutting)
+
+ 8. HADOOP-234. Add pipes facility, which permits writing MapReduce
+ programs in C++.
+
+ 9. HADOOP-1359. Fix a potential NullPointerException in HDFS.
+ (Hairong Kuang via cutting)
+
+ 10. HADOOP-1364. Fix inconsistent synchronization in SequenceFile.
+ (omalley via cutting)
+
+ 11. HADOOP-1379. Add findbugs target to build.xml.
+ (Nigel Daley via cutting)
+
+ 12. HADOOP-1364. Fix various inconsistent synchronization issues.
+ (Devaraj Das via cutting)
+
+ 13. HADOOP-1393. Remove a potential unexpected negative number from
+ uses of random number generator. (omalley via cutting)
+
+ 14. HADOOP-1387. A number of "performance" code-cleanups suggested
+ by findbugs. (Arun C Murthy via cutting)
+
+ 15. HADOOP-1401. Add contrib/hbase javadoc to tree. (stack via cutting)
+
+ 16. HADOOP-894. Change HDFS so that the client only retrieves a limited
+ number of block locations per request from the namenode.
+ (Konstantin Shvachko via cutting)
+
+ 17. HADOOP-1406. Plug a leak in MapReduce's use of metrics.
+ (David Bowen via cutting)
+
+ 18. HADOOP-1394. Implement "performance" code-cleanups in HDFS
+ suggested by findbugs. (Raghu Angadi via cutting)
+
+ 19. HADOOP-1413. Add example program that uses Knuth's dancing links
+ algorithm to solve pentomino problems. (omalley via cutting)
+
+ 20. HADOOP-1226. Change HDFS so that paths it returns are always
+ fully qualified. (Dhruba Borthakur via cutting)
+
+ 21. HADOOP-800. Improvements to HDFS web-based file browser.
+ (Enis Soztutar via cutting)
+
+ 22. HADOOP-1408. Fix a compiler warning by adding a class to replace
+ a generic. (omalley via cutting)
+
+ 23. HADOOP-1376. Modify RandomWriter example so that it can generate
+ data for the Terasort benchmark. (Devaraj Das via cutting)
+
+ 24. HADOOP-1429. Stop logging exceptions during normal IPC server
+ shutdown. (stack via cutting)
+
+ 25. HADOOP-1461. Fix the synchronization of the task tracker to
+ avoid lockups in job cleanup. (Arun C Murthy via omalley)
+
+ 26. HADOOP-1446. Update the TaskTracker metrics while the task is
+ running. (Devaraj via omalley)
+
+ 27. HADOOP-1414. Fix a number of issues identified by FindBugs as
+ "Bad Practice". (Dhruba Borthakur via cutting)
+
+ 28. HADOOP-1392. Fix "correctness" bugs identified by FindBugs in
+ fs and dfs packages. (Raghu Angadi via cutting)
+
+ 29. HADOOP-1412. Fix "dodgy" bugs identified by FindBugs in fs and
+ io packages. (Hairong Kuang via cutting)
+
+ 30. HADOOP-1261. Remove redundant events from HDFS namenode's edit
+ log when a datanode restarts. (Raghu Angadi via cutting)
+
+ 31. HADOOP-1336. Re-enable speculative execution by
+ default. (omalley via cutting)
+
+ 32. HADOOP-1311. Fix a bug in BytesWritable#set() where start offset
+ was ignored. (Dhruba Borthakur via cutting)
+
+ 33. HADOOP-1450. Move checksumming closer to user code, so that
+ checksums are created before data is stored in large buffers and
+ verified after data is read from large buffers, to better catch
+ memory errors. (cutting)
+
+ 34. HADOOP-1447. Add support in contrib/data_join for text inputs.
+ (Senthil Subramanian via cutting)
+
+ 35. HADOOP-1456. Fix TestDecommission assertion failure by setting
+ the namenode to ignore the load on datanodes while allocating
+ replicas. (Dhruba Borthakur via tomwhite)
+
+ 36. HADOOP-1396. Fix FileNotFoundException on DFS block.
+ (Dhruba Borthakur via tomwhite)
+
+ 37. HADOOP-1467. Remove redundant counters from WordCount example.
+ (Owen O'Malley via tomwhite)
+
+ 38. HADOOP-1139. Log HDFS block transitions at INFO level, to better
+ enable diagnosis of problems. (Dhruba Borthakur via cutting)
+
+ 39. HADOOP-1269. Finer grained locking in HDFS namenode.
+ (Dhruba Borthakur via cutting)
+
+ 40. HADOOP-1438. Improve HDFS documentation, correcting typos and
+ making images appear in PDF. Also update copyright date for all
+ docs. (Luke Nezda via cutting)
+
+ 41. HADOOP-1457. Add counters for monitoring task assignments.
+ (Arun C Murthy via tomwhite)
+
+ 42. HADOOP-1472. Fix so that timed-out tasks are counted as failures
+ rather than as killed. (Arun C Murthy via cutting)
+
+ 43. HADOOP-1234. Fix a race condition in file cache that caused
+ tasktracker to not be able to find cached files.
+ (Arun C Murthy via cutting)
+
+ 44. HADOOP-1482. Fix secondary namenode to roll info port.
+ (Dhruba Borthakur via cutting)
+
+ 45. HADOOP-1300. Improve removal of excess block replicas to be
+ rack-aware. Attempts are now made to keep replicas on more
+ racks. (Hairong Kuang via cutting)
+
+ 46. HADOOP-1417. Disable a few FindBugs checks that generate a lot
+ of spurious warnings. (Nigel Daley via cutting)
+
+ 47. HADOOP-1320. Rewrite RandomWriter example to bypass reduce.
+ (Arun C Murthy via cutting)
+
+ 48. HADOOP-1449. Add some examples to contrib/data_join.
+ (Senthil Subramanian via cutting)
+
+ 49. HADOOP-1459. Fix so that, in HDFS, getFileCacheHints() returns
+ hostnames instead of IP addresses. (Dhruba Borthakur via cutting)
+
+ 50. HADOOP-1493. Permit specification of "java.library.path" system
+ property in "mapred.child.java.opts" configuration property.
+ (Enis Soztutar via cutting)
+
+ 51. HADOOP-1372. Use LocalDirAllocator for HDFS temporary block
+ files, so that disk space, writability, etc. is considered.
+ (Dhruba Borthakur via cutting)
+
+ 52. HADOOP-1193. Pool allocation of compression codecs. This
+ eliminates a memory leak that could cause OutOfMemoryException,
+ and also substantially improves performance.
+ (Arun C Murthy via cutting)
+
+ 53. HADOOP-1492. Fix a NullPointerException handling version
+ mismatch during datanode registration.
+ (Konstantin Shvachko via cutting)
+
+ 54. HADOOP-1442. Fix handling of zero-length input splits.
+ (Senthil Subramanian via cutting)
+
+ 55. HADOOP-1444. Fix HDFS block id generation to check pending
+ blocks for duplicates. (Dhruba Borthakur via cutting)
+
+ 56. HADOOP-1207. Fix FsShell's 'rm' command to not stop when one of
+ the named files does not exist. (Tsz Wo Sze via cutting)
+
+ 57. HADOOP-1475. Clear tasktracker's file cache before it
+ re-initializes, to avoid confusion. (omalley via cutting)
+
+ 58. HADOOP-1505. Remove spurious stacktrace in ZlibFactory
+ introduced in HADOOP-1093. (Michael Stack via tomwhite)
+
+ 59. HADOOP-1484. Permit one to kill jobs from the web ui. Note that
+ this is disabled by default. One must set
+ "webinterface.private.actions" to enable this.
+ (Enis Soztutar via cutting)
+
+ 60. HADOOP-1003. Remove flushing of namenode edit log from primary
+ namenode lock, increasing namenode throughput.
+ (Dhruba Borthakur via cutting)
+
+ 61. HADOOP-1023. Add links to searchable mail archives.
+ (tomwhite via cutting)
+
+ 62. HADOOP-1504. Fix terminate-hadoop-cluster script in contrib/ec2
+ to only terminate Hadoop instances, and not other instances
+ started by the same user. (tomwhite via cutting)
+
+ 63. HADOOP-1462. Improve task progress reporting. Progress reports
+ are no longer blocking since i/o is performed in a separate
+ thread. Reporting during sorting and more is also more
+ consistent. (Vivek Ratan via cutting)
+
+ 64. [ intentionally blank ]
+
+ 65. HADOOP-1453. Remove some unneeded calls to FileSystem#exists()
+ when opening files, reducing the namenode load somewhat.
+ (Raghu Angadi via cutting)
+
+ 66. HADOOP-1489. Fix text input truncation bug due to mark/reset.
+ Add a unittest. (Bwolen Yang via cutting)
+
+ 67. HADOOP-1455. Permit specification of arbitrary job options on
+ pipes command line. (Devaraj Das via cutting)
+
+ 68. HADOOP-1501. Better randomize sending of block reports to
+ namenode, so reduce load spikes. (Dhruba Borthakur via cutting)
+
+ 69. HADOOP-1147. Remove @author tags from Java source files.
+
+ 70. HADOOP-1283. Convert most uses of UTF8 in the namenode to be
+ String. (Konstantin Shvachko via cutting)
+
+ 71. HADOOP-1511. Speedup hbase unit tests. (stack via cutting)
+
+ 72. HADOOP-1517. Remove some synchronization in namenode to permit
+ finer grained locking previously added. (Konstantin Shvachko via cutting)
+
+ 73. HADOOP-1512. Fix failing TestTextInputFormat on Windows.
+ (Senthil Subramanian via nigel)
+
+ 74. HADOOP-1518. Add a session id to job metrics, for use by HOD.
+ (David Bowen via cutting)
+
+ 75. HADOOP-1292. Change 'bin/hadoop fs -get' to first copy files to
+ a temporary name, then rename them to their final name, so that
+ failures don't leave partial files. (Tsz Wo Sze via cutting)
+
+ 76. HADOOP-1377. Add support for modification time to FileSystem and
+ implement in HDFS and local implementations. Also, alter access
+ to file properties to be through a new FileStatus interface.
+ (Dhruba Borthakur via cutting)
+
+ 77. HADOOP-1515. Add MultiFileInputFormat, which can pack multiple,
+ typically small, input files into each split. (Enis Soztutar via cutting)
+
+ 78. HADOOP-1514. Make reducers report progress while waiting for map
+ outputs, so they're not killed. (Vivek Ratan via cutting)
+
+ 79. HADOOP-1508. Add an Ant task for FsShell operations. Also add
+ new FsShell commands "touchz", "test" and "stat".
+ (Chris Douglas via cutting)
+
+ 80. HADOOP-1028. Add log messages for server startup and shutdown.
+ (Tsz Wo Sze via cutting)
+
+ 81. HADOOP-1485. Add metrics for monitoring shuffle.
+ (Devaraj Das via cutting)
+
+ 82. HADOOP-1536. Remove file locks from libhdfs tests.
+ (Dhruba Borthakur via nigel)
+
+ 83. HADOOP-1520. Add appropriate synchronization to FSEditsLog.
+ (Dhruba Borthakur via nigel)
+
+ 84. HADOOP-1513. Fix a race condition in directory creation.
+ (Devaraj via omalley)
+
+ 85. HADOOP-1546. Remove spurious column from HDFS web UI.
+ (Dhruba Borthakur via cutting)
+
+ 86. HADOOP-1556. Make LocalJobRunner delete working files at end of
+ job run. (Devaraj Das via tomwhite)
+
+ 87. HADOOP-1571. Add contrib lib directories to root build.xml
+ javadoc classpath. (Michael Stack via tomwhite)
+
+ 88. HADOOP-1554. Log killed tasks to the job history and display them on the
+ web/ui. (Devaraj Das via omalley)
+
+ 89. HADOOP-1533. Add persistent error logging for distcp. The logs are stored
+ into a specified hdfs directory. (Senthil Subramanian via omalley)
+
+ 90. HADOOP-1286. Add support to HDFS for distributed upgrades, which
+ permits coordinated upgrade of datanode data.
+ (Konstantin Shvachko via cutting)
+
+ 91. HADOOP-1580. Improve contrib/streaming so that subprocess exit
+ status is displayed for errors. (John Heidemann via cutting)
+
+ 92. HADOOP-1448. In HDFS, randomize lists of non-local block
+ locations returned to client, so that load is better balanced.
+ (Hairong Kuang via cutting)
+
+ 93. HADOOP-1578. Fix datanode to send its storage id to namenode
+ during registration. (Konstantin Shvachko via cutting)
+
+ 94. HADOOP-1584. Fix a bug in GenericWritable which limited it to
+ 128 types instead of 256. (Espen Amble Kolstad via cutting)
+
+ 95. HADOOP-1473. Make job ids unique across jobtracker restarts.
+ (omalley via cutting)
+
+ 96. HADOOP-1582. Fix hdfslib to return 0 instead of -1 at
+ end-of-file, per C conventions. (Christian Kunz via cutting)
+
+ 97. HADOOP-911. Fix a multithreading bug in libhdfs.
+ (Christian Kunz)
+
+ 98. HADOOP-1486. Fix so that fatal exceptions in namenode cause it
+ to exit. (Dhruba Borthakur via cutting)
+
+ 99. HADOOP-1470. Factor checksum generation and validation out of
+ ChecksumFileSystem so that it can be reused by FileSystem's with
+ built-in checksumming. (Hairong Kuang via cutting)
+
+100. HADOOP-1590. Use relative urls in jobtracker jsp pages, so that
+ webapp can be used in non-root contexts. (Thomas Friol via cutting)
+
+101. HADOOP-1596. Fix the parsing of taskids by streaming and improve the
+ error reporting. (omalley)
+
+102. HADOOP-1535. Fix the user-controlled grouping to the reduce function.
+ (Vivek Ratan via omalley)
+
+103. HADOOP-1585. Modify GenericWritable to declare the classes as subtypes
+ of Writable (Espen Amble Kolstad via omalley)
+
+104. HADOOP-1576. Fix errors in count of completed tasks when
+ speculative execution is enabled. (Arun C Murthy via cutting)
+
+105. HADOOP-1598. Fix license headers: adding missing; updating old.
+ (Enis Soztutar via cutting)
+
+106. HADOOP-1547. Provide examples for aggregate library.
+ (Runping Qi via tomwhite)
+
+107. HADOOP-1570. Permit jobs to enable and disable the use of
+ hadoop's native library. (Arun C Murthy via cutting)
+
+108. HADOOP-1433. Add job priority. (Johan Oskarsson via tomwhite)
+
+109. HADOOP-1597. Add status reports and post-upgrade options to HDFS
+ distributed upgrade. (Konstantin Shvachko via cutting)
+
+110. HADOOP-1524. Permit user task logs to appear as they're
+ created. (Michael Bieniosek via cutting)
+
+111. HADOOP-1599. Fix distcp bug on Windows. (Senthil Subramanian via cutting)
+
+112. HADOOP-1562. Add JVM metrics, including GC and logging stats.
+ (David Bowen via cutting)
+
+113. HADOOP-1613. Fix "DFS Health" page to display correct time of
+ last contact. (Dhruba Borthakur via cutting)
+
+114. HADOOP-1134. Add optimized checksum support to HDFS. Checksums
+ are now stored with each block, rather than as parallel files.
+ This reduces the namenode's memory requirements and increases
+ data integrity. (Raghu Angadi via cutting)
+
+115. HADOOP-1400. Make JobClient retry requests, so that clients can
+ survive jobtracker problems. (omalley via cutting)
+
+116. HADOOP-1564. Add unit tests for HDFS block-level checksums.
+ (Dhruba Borthakur via cutting)
+
+117. HADOOP-1620. Reduce the number of abstract FileSystem methods,
+ simplifying implementations. (cutting)
+
+118. HADOOP-1625. Fix a "could not move files" exception in datanode.
+ (Raghu Angadi via cutting)
+
+119. HADOOP-1624. Fix an infinite loop in datanode. (Raghu Angadi via cutting)
+
+120. HADOOP-1084. Switch mapred file cache to use file modification
+ time instead of checksum to detect file changes, as checksums are
+ no longer easily accessed. (Arun C Murthy via cutting)
+
+130. HADOOP-1623. Fix an infinite loop when copying directories.
+ (Dhruba Borthakur via cutting)
+
+131. HADOOP-1603. Fix a bug in namenode initialization where
+ default replication is sometimes reset to one on restart.
+ (Raghu Angadi via cutting)
+
+132. HADOOP-1635. Remove hardcoded keypair name and fix launch-hadoop-cluster
+ to support later versions of ec2-api-tools. (Stu Hood via tomwhite)
+
+133. HADOOP-1638. Fix contrib EC2 scripts to support NAT addressing.
+ (Stu Hood via tomwhite)
+
+134. HADOOP-1632. Fix an IllegalArgumentException in fsck.
+ (Hairong Kuang via cutting)
+
+135. HADOOP-1619. Fix FSInputChecker to not attempt to read past EOF.
+ (Hairong Kuang via cutting)
+
+136. HADOOP-1640. Fix TestDecommission on Windows.
+ (Dhruba Borthakur via cutting)
+
+137. HADOOP-1587. Fix TestSymLink to get required system properties.
+ (Devaraj Das via omalley)
+
+138. HADOOP-1628. Add block CRC protocol unit tests. (Raghu Angadi via omalley)
+
+139. HADOOP-1653. FSDirectory code-cleanups. FSDirectory.INode
+ becomes a static class. (Christophe Taton via dhruba)
+
+140. HADOOP-1066. Restructure documentation to make more user
+ friendly. (Connie Kleinjans and Jeff Hammerbacher via cutting)
+
+141. HADOOP-1551. libhdfs supports setting replication factor and
+ retrieving modification time of files. (Sameer Paranjpye via dhruba)
+
+141. HADOOP-1647. FileSystem.getFileStatus returns valid values for "/".
+ (Dhruba Borthakur via dhruba)
+
+142. HADOOP-1657. Fix NNBench to ensure that the block size is a
+ multiple of bytes.per.checksum. (Raghu Angadi via dhruba)
+
+143. HADOOP-1553. Replace user task output and log capture code to use shell
+ redirection instead of copier threads in the TaskTracker. Capping the
+ size of the output is now done via tail in memory and thus should not be
+ large. The output of the tasklog servlet is not forced into UTF8 and is
+ not buffered entirely in memory. (omalley)
+ Configuration changes to hadoop-default.xml:
+ remove mapred.userlog.num.splits
+ remove mapred.userlog.purge.splits
+ change default mapred.userlog.limit.kb to 0 (no limit)
+ change default mapred.userlog.retain.hours to 24
+ Configuration changes to log4j.properties:
+ remove log4j.appender.TLA.noKeepSplits
+ remove log4j.appender.TLA.purgeLogSplits
+ remove log4j.appender.TLA.logsRetainHours
+ URL changes:
+ http://<tasktracker>/tasklog.jsp -> http://<tasktracker>tasklog with
+ parameters limited to start and end, which may be positive (from
+ start) or negative (from end).
+ Environment:
+ require bash (v2 or later) and tail
+
+144. HADOOP-1659. Fix a job id/job name mixup. (Arun C. Murthy via omalley)
+
+145. HADOOP-1665. With HDFS Trash enabled and the same file was created
+ and deleted more than once, the suceeding deletions creates Trash item
+ names suffixed with a integer. (Dhruba Borthakur via dhruba)
+
+146. HADOOP-1666. FsShell object can be used for multiple fs commands.
+ (Dhruba Borthakur via dhruba)
+
+147. HADOOP-1654. Remove performance regression introduced by Block CRC.
+ (Raghu Angadi via dhruba)
+
+148. HADOOP-1680. Improvements to Block CRC upgrade messages.
+ (Raghu Angadi via dhruba)
+
+149. HADOOP-71. Allow Text and SequenceFile Map/Reduce inputs from non-default
+ filesystems. (omalley)
+
+150. HADOOP-1568. Expose HDFS as xml/http filesystem to provide cross-version
+ compatability. (Chris Douglas via omalley)
+
+151. HADOOP-1668. Added an INCOMPATIBILITY section to CHANGES.txt. (nigel)
+
+152. HADOOP-1629. Added a upgrade test for HADOOP-1134.
+ (Raghu Angadi via nigel)
+
+153. HADOOP-1698. Fix performance problems on map output sorting for jobs
+ with large numbers of reduces. (Devaraj Das via omalley)
+
+154. HADOOP-1716. Fix a Pipes wordcount example to remove the 'file:'
+ schema from its output path. (omalley via cutting)
+
+155. HADOOP-1714. Fix TestDFSUpgradeFromImage to work on Windows.
+ (Raghu Angadi via nigel)
+
+156. HADOOP-1663. Return a non-zero exit code if streaming fails. (Lohit Renu
+ via omalley)
+
+157. HADOOP-1712. Fix an unhandled exception on datanode during block
+ CRC upgrade. (Raghu Angadi via cutting)
+
+158. HADOOP-1717. Fix TestDFSUpgradeFromImage to work on Solaris.
+ (nigel via cutting)
+
+159. HADOOP-1437. Add Eclipse plugin in contrib.
+ (Eugene Hung and Christophe Taton via cutting)
+
+
+Release 0.13.0 - 2007-06-08
+
+ 1. HADOOP-1047. Fix TestReplication to succeed more reliably.
+ (Hairong Kuang via cutting)
+
+ 2. HADOOP-1063. Fix a race condition in MiniDFSCluster test code.
+ (Hairong Kuang via cutting)
+
+ 3. HADOOP-1101. In web ui, split shuffle statistics from reduce
+ statistics, and add some task averages. (Devaraj Das via cutting)
+
+ 4. HADOOP-1071. Improve handling of protocol version mismatch in
+ JobTracker. (Tahir Hashmi via cutting)
+
+ 5. HADOOP-1116. Increase heap size used for contrib unit tests.
+ (Philippe Gassmann via cutting)
+
+ 6. HADOOP-1120. Add contrib/data_join, tools to simplify joining
+ data from multiple sources using MapReduce. (Runping Qi via cutting)
+
+ 7. HADOOP-1064. Reduce log level of some DFSClient messages.
+ (Dhruba Borthakur via cutting)
+
+ 8. HADOOP-1137. Fix StatusHttpServer to work correctly when
+ resources are in a jar file. (Benjamin Reed via cutting)
+
+ 9. HADOOP-1094. Optimize generated Writable implementations for
+ records to not allocate a new BinaryOutputArchive or
+ BinaryInputArchive per call. (Milind Bhandarkar via cutting)
+
+10. HADOOP-1068. Improve error message for clusters with 0 datanodes.
+ (Dhruba Borthakur via tomwhite)
+
+11. HADOOP-1122. Fix divide-by-zero exception in FSNamesystem
+ chooseTarget method. (Dhruba Borthakur via tomwhite)
+
+12. HADOOP-1131. Add a closeAll() static method to FileSystem.
+ (Philippe Gassmann via tomwhite)
+
+13. HADOOP-1085. Improve port selection in HDFS and MapReduce test
+ code. Ports are now selected by the OS during testing rather than
+ by probing for free ports, improving test reliability.
+ (Arun C Murthy via cutting)
+
+14. HADOOP-1153. Fix HDFS daemons to correctly stop their threads.
+ (Konstantin Shvachko via cutting)
+
+15. HADOOP-1146. Add a counter for reduce input keys and rename the
+ "reduce input records" counter to be "reduce input groups".
+ (David Bowen via cutting)
+
+16. HADOOP-1165. In records, replace idential generated toString
+ methods with a method on the base class. (Milind Bhandarkar via cutting)
+
+17. HADOOP-1164. Fix TestReplicationPolicy to specify port zero, so
+ that a free port is automatically selected. (omalley via cutting)
+
+18. HADOOP-1166. Add a NullOutputFormat and use it in the
+ RandomWriter example. (omalley via cutting)
+
+19. HADOOP-1169. Fix a cut/paste error in CopyFiles utility so that
+ S3-based source files are correctly copied. (Michael Stack via cutting)
+
+20. HADOOP-1167. Remove extra synchronization in InMemoryFileSystem.
+ (omalley via cutting)
+
+21. HADOOP-1110. Fix an off-by-one error counting map inputs.
+ (David Bowen via cutting)
+
+22. HADOOP-1178. Fix a NullPointerException during namenode startup.
+ (Dhruba Borthakur via cutting)
+
+23. HADOOP-1011. Fix a ConcurrentModificationException when viewing
+ job history. (Tahir Hashmi via cutting)
+
+24. HADOOP-672. Improve help for fs shell commands.
+ (Dhruba Borthakur via cutting)
+
+25. HADOOP-1170. Improve datanode performance by removing device
+ checks from common operations. (Igor Bolotin via cutting)
+
+26. HADOOP-1090. Fix SortValidator's detection of whether the input
+ file belongs to the sort-input or sort-output directory.
+ (Arun C Murthy via tomwhite)
+
+27. HADOOP-1081. Fix bin/hadoop on Darwin. (Michael Bieniosek via cutting)
+
+28. HADOOP-1045. Add contrib/hbase, a BigTable-like online database.
+ (Jim Kellerman via cutting)
+
+29. HADOOP-1156. Fix a NullPointerException in MiniDFSCluster.
+ (Hairong Kuang via cutting)
+
+30. HADOOP-702. Add tools to help automate HDFS upgrades.
+ (Konstantin Shvachko via cutting)
+
+31. HADOOP-1163. Fix ganglia metrics to aggregate metrics from different
+ hosts properly. (Michael Bieniosek via tomwhite)
+
+32. HADOOP-1194. Make compression style record level for map output
+ compression. (Arun C Murthy via tomwhite)
+
+33. HADOOP-1187. Improve DFS Scalability: avoid scanning entire list of
+ datanodes in getAdditionalBlocks. (Dhruba Borthakur via tomwhite)
+
+34. HADOOP-1133. Add tool to analyze and debug namenode on a production
+ cluster. (Dhruba Borthakur via tomwhite)
+
+35. HADOOP-1151. Remove spurious printing to stderr in streaming
+ PipeMapRed. (Koji Noguchi via tomwhite)
+
+36. HADOOP-988. Change namenode to use a single map of blocks to metadata.
+ (Raghu Angadi via tomwhite)
+
+37. HADOOP-1203. Change UpgradeUtilities used by DFS tests to use
+ MiniDFSCluster to start and stop NameNode/DataNodes.
+ (Nigel Daley via tomwhite)
+
+38. HADOOP-1217. Add test.timeout property to build.xml, so that
+ long-running unit tests may be automatically terminated.
+ (Nigel Daley via cutting)
+
+39. HADOOP-1149. Improve DFS Scalability: make
+ processOverReplicatedBlock() a no-op if blocks are not
+ over-replicated. (Raghu Angadi via tomwhite)
+
+40. HADOOP-1149. Improve DFS Scalability: optimize getDistance(),
+ contains(), and isOnSameRack() in NetworkTopology.
+ (Hairong Kuang via tomwhite)
+
+41. HADOOP-1218. Make synchronization on TaskTracker's RunningJob
+ object consistent. (Devaraj Das via tomwhite)
+
+42. HADOOP-1219. Ignore progress report once a task has reported as
+ 'done'. (Devaraj Das via tomwhite)
+
+43. HADOOP-1114. Permit user to specify additional CLASSPATH elements
+ with a HADOOP_CLASSPATH environment variable. (cutting)
+
+44. HADOOP-1198. Remove ipc.client.timeout parameter override from
+ unit test configuration. Using the default is more robust and
+ has almost the same run time. (Arun C Murthy via tomwhite)
+
+45. HADOOP-1211. Remove deprecated constructor and unused static
+ members in DataNode class. (Konstantin Shvachko via tomwhite)
+
+46. HADOOP-1136. Fix ArrayIndexOutOfBoundsException in
+ FSNamesystem$UnderReplicatedBlocks add() method.
+ (Hairong Kuang via tomwhite)
+
+47. HADOOP-978. Add the client name and the address of the node that
+ previously started to create the file to the description of
+ AlreadyBeingCreatedException. (Konstantin Shvachko via tomwhite)
+
+48. HADOOP-1001. Check the type of keys and values generated by the
+ mapper against the types specified in JobConf.
+ (Tahir Hashmi via tomwhite)
+
+49. HADOOP-971. Improve DFS Scalability: Improve name node performance
+ by adding a hostname to datanodes map. (Hairong Kuang via tomwhite)
+
+50. HADOOP-1189. Fix 'No space left on device' exceptions on datanodes.
+ (Raghu Angadi via tomwhite)
+
+51. HADOOP-819. Change LineRecordWriter to not insert a tab between
+ key and value when either is null, and to print nothing when both
+ are null. (Runping Qi via cutting)
+
+52. HADOOP-1204. Rename InputFormatBase to be FileInputFormat, and
+ deprecate InputFormatBase. Also make LineRecordReader easier to
+ extend. (Runping Qi via cutting)
+
+53. HADOOP-1213. Improve logging of errors by IPC server, to
+ consistently include the service name and the call. (cutting)
+
+54. HADOOP-1238. Fix metrics reporting by TaskTracker to correctly
+ track maps_running and reduces_running.
+ (Michael Bieniosek via cutting)
+
+55. HADOOP-1093. Fix a race condition in HDFS where blocks were
+ sometimes erased before they were reported written.
+ (Dhruba Borthakur via cutting)
+
+56. HADOOP-1239. Add a package name to some testjar test classes.
+ (Jim Kellerman via cutting)
+
+57. HADOOP-1241. Fix NullPointerException in processReport when
+ namenode is restarted. (Dhruba Borthakur via tomwhite)
+
+58. HADOOP-1244. Fix stop-dfs.sh to no longer incorrectly specify
+ slaves file for stopping datanode.
+ (Michael Bieniosek via tomwhite)
+
+59. HADOOP-1253. Fix ConcurrentModificationException and
+ NullPointerException in JobControl.
+ (Johan Oskarson via tomwhite)
+
+60. HADOOP-1256. Fix NameNode so that multiple DataNodeDescriptors
+ can no longer be created on startup. (Hairong Kuang via cutting)
+
+61. HADOOP-1214. Replace streaming classes with new counterparts
+ from Hadoop core. (Runping Qi via tomwhite)
+
+62. HADOOP-1250. Move a chmod utility from streaming to FileUtil.
+ (omalley via cutting)
+
+63. HADOOP-1258. Fix TestCheckpoint test case to wait for
+ MiniDFSCluster to be active. (Nigel Daley via tomwhite)
+
+64. HADOOP-1148. Re-indent all Java source code to consistently use
+ two spaces per indent level. (cutting)
+
+65. HADOOP-1251. Add a method to Reporter to get the map InputSplit.
+ (omalley via cutting)
+
+66. HADOOP-1224. Fix "Browse the filesystem" link to no longer point
+ to dead datanodes. (Enis Soztutar via tomwhite)
+
+67. HADOOP-1154. Fail a streaming task if the threads reading from or
+ writing to the streaming process fail. (Koji Noguchi via tomwhite)
+
+68. HADOOP-968. Move shuffle and sort to run in reduce's child JVM,
+ rather than in TaskTracker. (Devaraj Das via cutting)
+
+69. HADOOP-1111. Add support for client notification of job
+ completion. If the job configuration has a job.end.notification.url
+ property it will make a HTTP GET request to the specified URL.
+ The number of retries and the interval between retries is also
+ configurable. (Alejandro Abdelnur via tomwhite)
+
+70. HADOOP-1275. Fix misspelled job notification property in
+ hadoop-default.xml. (Alejandro Abdelnur via tomwhite)
+
+71. HADOOP-1152. Fix race condition in MapOutputCopier.copyOutput file
+ rename causing possible reduce task hang.
+ (Tahir Hashmi via tomwhite)
+
+72. HADOOP-1050. Distinguish between failed and killed tasks so as to
+ not count a lost tasktracker against the job.
+ (Arun C Murthy via tomwhite)
+
+73. HADOOP-1271. Fix StreamBaseRecordReader to be able to log record
+ data that's not UTF-8. (Arun C Murthy via tomwhite)
+
+74. HADOOP-1190. Fix unchecked warnings in main Hadoop code.
+ (tomwhite)
+
+75. HADOOP-1127. Fix AlreadyBeingCreatedException in namenode for
+ jobs run with speculative execution.
+ (Arun C Murthy via tomwhite)
+
+76. HADOOP-1282. Omnibus HBase patch. Improved tests & configuration.
+ (Jim Kellerman via cutting)
+
+77. HADOOP-1262. Make dfs client try to read from a different replica
+ of the checksum file when a checksum error is detected.
+ (Hairong Kuang via tomwhite)
+
+78. HADOOP-1279. Fix JobTracker to maintain list of recently
+ completed jobs by order of completion, not submission.
+ (Arun C Murthy via cutting)
+
+79. HADOOP-1284. In contrib/streaming, permit flexible specification
+ of field delimiter and fields for partitioning and sorting.
+ (Runping Qi via cutting)
+
+80. HADOOP-1176. Fix a bug where reduce would hang when a map had
+ more than 2GB of output for it. (Arun C Murthy via cutting)
+
+81. HADOOP-1293. Fix contrib/streaming to print more than the first
+ twenty lines of standard error. (Koji Noguchi via cutting)
+
+82. HADOOP-1297. Fix datanode so that requests to remove blocks that
+ do not exist no longer causes block reports to be re-sent every
+ second. (Dhruba Borthakur via cutting)
+
+83. HADOOP-1216. Change MapReduce so that, when numReduceTasks is
+ zero, map outputs are written directly as final output, skipping
+ shuffle, sort and reduce. Use this to implement reduce=NONE
+ option in contrib/streaming. (Runping Qi via cutting)
+
+84. HADOOP-1294. Fix unchecked warnings in main Hadoop code under
+ Java 6. (tomwhite)
+
+85. HADOOP-1299. Fix so that RPC will restart after RPC.stopClient()
+ has been called. (Michael Stack via cutting)
+
+86. HADOOP-1278. Improve blacklisting of TaskTrackers by JobTracker,
+ to reduce false positives. (Arun C Murthy via cutting)
+
+87. HADOOP-1290. Move contrib/abacus into mapred/lib/aggregate.
+ (Runping Qi via cutting)
+
+88. HADOOP-1272. Extract inner classes from FSNamesystem into separate
+ classes. (Dhruba Borthakur via tomwhite)
+
+89. HADOOP-1247. Add support to contrib/streaming for aggregate
+ package, formerly called Abacus. (Runping Qi via cutting)
+
+90. HADOOP-1061. Fix bug in listing files in the S3 filesystem.
+ NOTE: this change is not backwards compatible! You should use the
+ MigrationTool supplied to migrate existing S3 filesystem data to
+ the new format. Please backup your data first before upgrading
+ (using 'hadoop distcp' for example). (tomwhite)
+
+91. HADOOP-1304. Make configurable the maximum number of task
+ attempts before a job fails. (Devaraj Das via cutting)
+
+92. HADOOP-1308. Use generics to restrict types when classes are
+ passed as parameters to JobConf methods. (Michael Bieniosek via cutting)
+
+93. HADOOP-1312. Fix a ConcurrentModificationException in NameNode
+ that killed the heartbeat monitoring thread.
+ (Dhruba Borthakur via cutting)
+
+94. HADOOP-1315. Clean up contrib/streaming, switching it to use core
+ classes more and removing unused code. (Runping Qi via cutting)
+
+95. HADOOP-485. Allow a different comparator for grouping keys in
+ calls to reduce. (Tahir Hashmi via cutting)
+
+96. HADOOP-1322. Fix TaskTracker blacklisting to work correctly in
+ one- and two-node clusters. (Arun C Murthy via cutting)
+
+97. HADOOP-1144. Permit one to specify a maximum percentage of tasks
+ that can fail before a job is aborted. The default is zero.
+ (Arun C Murthy via cutting)
+
+98. HADOOP-1184. Fix HDFS decomissioning to complete when the only
+ copy of a block is on a decommissioned node. (Dhruba Borthakur via cutting)
+
+99. HADOOP-1263. Change DFSClient to retry certain namenode calls
+ with a random, exponentially increasing backoff time, to avoid
+ overloading the namenode on, e.g., job start. (Hairong Kuang via cutting)
+
+100. HADOOP-1325. First complete, functioning version of HBase.
+ (Jim Kellerman via cutting)
+
+101. HADOOP-1276. Make tasktracker expiry interval configurable.
+ (Arun C Murthy via cutting)
+
+102. HADOOP-1326. Change JobClient#RunJob() to return the job.
+ (omalley via cutting)
+
+103. HADOOP-1270. Randomize the fetch of map outputs, speeding the
+ shuffle. (Arun C Murthy via cutting)
+
+104. HADOOP-1200. Restore disk checking lost in HADOOP-1170.
+ (Hairong Kuang via cutting)
+
+105. HADOOP-1252. Changed MapReduce's allocation of local files to
+ use round-robin among available devices, rather than a hashcode.
+ More care is also taken to not allocate files on full or offline
+ drives. (Devaraj Das via cutting)
+
+106. HADOOP-1324. Change so that an FSError kills only the task that
+ generates it rather than the entire task tracker.
+ (Arun C Murthy via cutting)
+
+107. HADOOP-1310. Fix unchecked warnings in aggregate code. (tomwhite)
+
+108. HADOOP-1255. Fix a bug where the namenode falls into an infinite
+ loop trying to remove a dead node. (Hairong Kuang via cutting)
+
+109. HADOOP-1160. Fix DistributedFileSystem.close() to close the
+ underlying FileSystem, correctly aborting files being written.
+ (Hairong Kuang via cutting)
+
+110. HADOOP-1341. Fix intermittent failures in HBase unit tests
+ caused by deadlock. (Jim Kellerman via cutting)
+
+111. HADOOP-1350. Fix shuffle performance problem caused by forcing
+ chunked encoding of map outputs. (Devaraj Das via cutting)
+
+112. HADOOP-1345. Fix HDFS to correctly retry another replica when a
+ checksum error is encountered. (Hairong Kuang via cutting)
+
+113. HADOOP-1205. Improve synchronization around HDFS block map.
+ (Hairong Kuang via cutting)
+
+114. HADOOP-1353. Fix a potential NullPointerException in namenode.
+ (Dhruba Borthakur via cutting)
+
+115. HADOOP-1354. Fix a potential NullPointerException in FsShell.
+ (Hairong Kuang via cutting)
+
+116. HADOOP-1358. Fix a potential bug when DFSClient calls skipBytes.
+ (Hairong Kuang via cutting)
+
+117. HADOOP-1356. Fix a bug in ValueHistogram. (Runping Qi via cutting)
+
+118. HADOOP-1363. Fix locking bug in JobClient#waitForCompletion().
+ (omalley via cutting)
+
+119. HADOOP-1368. Fix inconsistent synchronization in JobInProgress.
+ (omalley via cutting)
+
+120. HADOOP-1369. Fix inconsistent synchronization in TaskTracker.
+ (omalley via cutting)
+
+121. HADOOP-1361. Fix various calls to skipBytes() to check return
+ value. (Hairong Kuang via cutting)
+
+122. HADOOP-1388. Fix a potential NullPointerException in web ui.
+ (Devaraj Das via cutting)
+
+123. HADOOP-1385. Fix MD5Hash#hashCode() to generally hash to more
+ than 256 values. (omalley via cutting)
+
+124. HADOOP-1386. Fix Path to not permit the empty string as a
+ path, as this has lead to accidental file deletion. Instead
+ force applications to use "." to name the default directory.
+ (Hairong Kuang via cutting)
+
+125. HADOOP-1407. Fix integer division bug in JobInProgress which
+ meant failed tasks didn't cause the job to fail.
+ (Arun C Murthy via tomwhite)
+
+126. HADOOP-1427. Fix a typo that caused GzipCodec to incorrectly use
+ a very small input buffer. (Espen Amble Kolstad via cutting)
+
+127. HADOOP-1435. Fix globbing code to no longer use the empty string
+ to indicate the default directory, per HADOOP-1386.
+ (Hairong Kuang via cutting)
+
+128. HADOOP-1411. Make task retry framework handle
+ AlreadyBeingCreatedException when wrapped as a RemoteException.
+ (Hairong Kuang via tomwhite)
+
+129. HADOOP-1242. Improve handling of DFS upgrades.
+ (Konstantin Shvachko via cutting)
+
+130. HADOOP-1332. Fix so that TaskTracker exits reliably during unit
+ tests on Windows. (omalley via cutting)
+
+131. HADOOP-1431. Fix so that sort progress reporting during map runs
+ only while sorting, so that stuck maps are correctly terminated.
+ (Devaraj Das and Arun C Murthy via cutting)
+
+132. HADOOP-1452. Change TaskTracker.MapOutputServlet.doGet.totalRead
+ to a long, permitting map outputs to exceed 2^31 bytes.
+ (omalley via cutting)
+
+133. HADOOP-1443. Fix a bug opening zero-length files in HDFS.
+ (Konstantin Shvachko via cutting)
+
+
+Release 0.12.3 - 2007-04-06
+
+ 1. HADOOP-1162. Fix bug in record CSV and XML serialization of
+ binary values. (Milind Bhandarkar via cutting)
+
+ 2. HADOOP-1123. Fix NullPointerException in LocalFileSystem when
+ trying to recover from a checksum error.
+ (Hairong Kuang & Nigel Daley via tomwhite)
+
+ 3. HADOOP-1177. Fix bug where IOException in MapOutputLocation.getFile
+ was not being logged. (Devaraj Das via tomwhite)
+
+ 4. HADOOP-1175. Fix bugs in JSP for displaying a task's log messages.
+ (Arun C Murthy via cutting)
+
+ 5. HADOOP-1191. Fix map tasks to wait until sort progress thread has
+ stopped before reporting the task done. (Devaraj Das via cutting)
+
+ 6. HADOOP-1192. Fix an integer overflow bug in FSShell's 'dus'
+ command and a performance problem in HDFS's implementation of it.
+ (Hairong Kuang via cutting)
+
+ 7. HADOOP-1105. Fix reducers to make "progress" while iterating
+ through values. (Devaraj Das & Owen O'Malley via tomwhite)
+
+ 8. HADOOP-1179. Make Task Tracker close index file as soon as the read
+ is done when serving get-map-output requests.
+ (Devaraj Das via tomwhite)
+
+
+Release 0.12.2 - 2007-23-17
+
+ 1. HADOOP-1135. Fix bug in block report processing which may cause
+ the namenode to delete blocks. (Dhruba Borthakur via tomwhite)
+
+ 2. HADOOP-1145. Make XML serializer and deserializer classes public
+ in record package. (Milind Bhandarkar via cutting)
+
+ 3. HADOOP-1140. Fix a deadlock in metrics. (David Bowen via cutting)
+
+ 4. HADOOP-1150. Fix streaming -reducer and -mapper to give them
+ defaults. (Owen O'Malley via tomwhite)
+
+
+Release 0.12.1 - 2007-03-17
+
+ 1. HADOOP-1035. Fix a StackOverflowError in FSDataSet.
+ (Raghu Angadi via cutting)
+
+ 2. HADOOP-1053. Fix VInt representation of negative values. Also
+ remove references in generated record code to methods outside of
+ the record package and improve some record documentation.
+ (Milind Bhandarkar via cutting)
+
+ 3. HADOOP-1067. Compile fails if Checkstyle jar is present in lib
+ directory. Also remove dependency on a particular Checkstyle
+ version number. (tomwhite)
+
+ 4. HADOOP-1060. Fix an IndexOutOfBoundsException in the JobTracker
+ that could cause jobs to hang. (Arun C Murthy via cutting)
+
+ 5. HADOOP-1077. Fix a race condition fetching map outputs that could
+ hang reduces. (Devaraj Das via cutting)
+
+ 6. HADOOP-1083. Fix so that when a cluster restarts with a missing
+ datanode, its blocks are replicated. (Hairong Kuang via cutting)
+
+ 7. HADOOP-1082. Fix a NullPointerException in ChecksumFileSystem.
+ (Hairong Kuang via cutting)
+
+ 8. HADOOP-1088. Fix record serialization of negative values.
+ (Milind Bhandarkar via cutting)
+
+ 9. HADOOP-1080. Fix bug in bin/hadoop on Windows when native
+ libraries are present. (ab via cutting)
+
+10. HADOOP-1091. Fix a NullPointerException in MetricsRecord.
+ (David Bowen via tomwhite)
+
+11. HADOOP-1092. Fix a NullPointerException in HeartbeatMonitor
+ thread. (Hairong Kuang via tomwhite)
+
+12. HADOOP-1112. Fix a race condition in Hadoop metrics.
+ (David Bowen via tomwhite)
+
+13. HADOOP-1108. Checksummed file system should retry reading if a
+ different replica is found when handling ChecksumException.
+ (Hairong Kuang via tomwhite)
+
+14. HADOOP-1070. Fix a problem with number of racks and datanodes
+ temporarily doubling. (Konstantin Shvachko via tomwhite)
+
+15. HADOOP-1099. Fix NullPointerException in JobInProgress.
+ (Gautam Kowshik via tomwhite)
+
+16. HADOOP-1115. Fix bug where FsShell copyToLocal doesn't
+ copy directories. (Hairong Kuang via tomwhite)
+
+17. HADOOP-1109. Fix NullPointerException in StreamInputFormat.
+ (Koji Noguchi via tomwhite)
+
+18. HADOOP-1117. Fix DFS scalability: when the namenode is
+ restarted it consumes 80% CPU. (Dhruba Borthakur via
+ tomwhite)
+
+19. HADOOP-1089. Make the C++ version of write and read v-int
+ agree with the Java versions. (Milind Bhandarkar via
+ tomwhite)
+
+20. HADOOP-1096. Rename InputArchive and OutputArchive and
+ make them public. (Milind Bhandarkar via tomwhite)
+
+21. HADOOP-1128. Fix missing progress information in map tasks.
+ (Espen Amble Kolstad, Andrzej Bialecki, and Owen O'Malley
+ via tomwhite)
+
+22. HADOOP-1129. Fix DFSClient to not hide IOExceptions in
+ flush method. (Hairong Kuang via tomwhite)
+
+23. HADOOP-1126. Optimize CPU usage for under replicated blocks
+ when cluster restarts. (Hairong Kuang via tomwhite)
+
+
+Release 0.12.0 - 2007-03-02
+
+ 1. HADOOP-975. Separate stdout and stderr from tasks.
+ (Arun C Murthy via cutting)
+
+ 2. HADOOP-982. Add some setters and a toString() method to
+ BytesWritable. (omalley via cutting)
+
+ 3. HADOOP-858. Move contrib/smallJobsBenchmark to src/test, removing
+ obsolete bits. (Nigel Daley via cutting)
+
+ 4. HADOOP-992. Fix MiniMR unit tests to use MiniDFS when specified,
+ rather than the local FS. (omalley via cutting)
+
+ 5. HADOOP-954. Change use of metrics to use callback mechanism.
+ Also rename utility class Metrics to MetricsUtil.
+ (David Bowen & Nigel Daley via cutting)
+
+ 6. HADOOP-893. Improve HDFS client's handling of dead datanodes.
+ The set is no longer reset with each block, but rather is now
+ maintained for the life of an open file. (Raghu Angadi via cutting)
+
+ 7. HADOOP-882. Upgrade to jets3t version 0.5, used by the S3
+ FileSystem. This version supports retries. (Michael Stack via cutting)
+
+ 8. HADOOP-977. Send task's stdout and stderr to JobClient's stdout
+ and stderr respectively, with each line tagged by the task's name.
+ (Arun C Murthy via cutting)
+
+ 9. HADOOP-761. Change unit tests to not use /tmp. (Nigel Daley via cutting)
+
+10. HADOOP-1007. Make names of metrics used in Hadoop unique.
+ (Nigel Daley via cutting)
+
+11. HADOOP-491. Change mapred.task.timeout to be per-job, and make a
+ value of zero mean no timeout. Also change contrib/streaming to
+ disable task timeouts. (Arun C Murthy via cutting)
+
+12. HADOOP-1010. Add Reporter.NULL, a Reporter implementation that
+ does nothing. (Runping Qi via cutting)
+
+13. HADOOP-923. In HDFS NameNode, move replication computation to a
+ separate thread, to improve heartbeat processing time.
+ (Dhruba Borthakur via cutting)
+
+14. HADOOP-476. Rewrite contrib/streaming command-line processing,
+ improving parameter validation. (Sanjay Dahiya via cutting)
+
+15. HADOOP-973. Improve error messages in Namenode. This should help
+ to track down a problem that was appearing as a
+ NullPointerException. (Dhruba Borthakur via cutting)
+
+16. HADOOP-649. Fix so that jobs with no tasks are not lost.
+ (Thomas Friol via cutting)
+
+17. HADOOP-803. Reduce memory use by HDFS namenode, phase I.
+ (Raghu Angadi via cutting)
+
+18. HADOOP-1021. Fix MRCaching-based unit tests on Windows.
+ (Nigel Daley via cutting)
+
+19. HADOOP-889. Remove duplicate code from HDFS unit tests.
+ (Milind Bhandarkar via cutting)
+
+20. HADOOP-943. Improve HDFS's fsck command to display the filename
+ for under-replicated blocks. (Dhruba Borthakur via cutting)
+
+21. HADOOP-333. Add validator for sort benchmark output.
+ (Arun C Murthy via cutting)
+
+22. HADOOP-947. Improve performance of datanode decomissioning.
+ (Dhruba Borthakur via cutting)
+
+23. HADOOP-442. Permit one to specify hosts allowed to connect to
+ namenode and jobtracker with include and exclude files. (Wendy
+ Chien via cutting)
+
+24. HADOOP-1017. Cache constructors, for improved performance.
+ (Ron Bodkin via cutting)
+
+25. HADOOP-867. Move split creation out of JobTracker to client.
+ Splits are now saved in a separate file, read by task processes
+ directly, so that user code is no longer required in the
+ JobTracker. (omalley via cutting)
+
+26. HADOOP-1006. Remove obsolete '-local' option from test code.
+ (Gautam Kowshik via cutting)
+
+27. HADOOP-952. Create a public (shared) Hadoop EC2 AMI.
+ The EC2 scripts now support launch of public AMIs.
+ (tomwhite)
+
+28. HADOOP-1025. Remove some obsolete code in ipc.Server. (cutting)
+
+29. HADOOP-997. Implement S3 retry mechanism for failed block
+ transfers. This includes a generic retry mechanism for use
+ elsewhere in Hadoop. (tomwhite)
+
+30. HADOOP-990. Improve HDFS support for full datanode volumes.
+ (Raghu Angadi via cutting)
+
+31. HADOOP-564. Replace uses of "dfs://" URIs with the more standard
+ "hdfs://". (Wendy Chien via cutting)
+
+32. HADOOP-1030. In unit tests, unify setting of ipc.client.timeout.
+ Also increase the value used from one to two seconds, in hopes of
+ making tests complete more reliably. (cutting)
+
+33. HADOOP-654. Stop assigning tasks to a tasktracker if it has
+ failed more than a specified number in the job.
+ (Arun C Murthy via cutting)
+
+34. HADOOP-985. Change HDFS to identify nodes by IP address rather
+ than by DNS hostname. (Raghu Angadi via cutting)
+
+35. HADOOP-248. Optimize location of map outputs to not use random
+ probes. (Devaraj Das via cutting)
+
+36. HADOOP-1029. Fix streaming's input format to correctly seek to
+ the start of splits. (Arun C Murthy via cutting)
+
+37. HADOOP-492. Add per-job and per-task counters. These are
+ incremented via the Reporter interface and available through the
+ web ui and the JobClient API. The mapreduce framework maintains a
+ few basic counters, and applications may add their own. Counters
+ are also passed to the metrics system.
+ (David Bowen via cutting)
+
+38. HADOOP-1034. Fix datanode to better log exceptions.
+ (Philippe Gassmann via cutting)
+
+39. HADOOP-878. In contrib/streaming, fix reducer=NONE to work with
+ multiple maps. (Arun C Murthy via cutting)
+
+40. HADOOP-1039. In HDFS's TestCheckpoint, avoid restarting
+ MiniDFSCluster so often, speeding this test. (Dhruba Borthakur via cutting)
+
+41. HADOOP-1040. Update RandomWriter example to use counters and
+ user-defined input and output formats. (omalley via cutting)
+
+42. HADOOP-1027. Fix problems with in-memory merging during shuffle
+ and re-enable this optimization. (Devaraj Das via cutting)
+
+43. HADOOP-1036. Fix exception handling in TaskTracker to keep tasks
+ from being lost. (Arun C Murthy via cutting)
+
+44. HADOOP-1042. Improve the handling of failed map output fetches.
+ (Devaraj Das via cutting)
+
+45. HADOOP-928. Make checksums optional per FileSystem.
+ (Hairong Kuang via cutting)
+
+46. HADOOP-1044. Fix HDFS's TestDecommission to not spuriously fail.
+ (Wendy Chien via cutting)
+
+47. HADOOP-972. Optimize HDFS's rack-aware block placement algorithm.
+ (Hairong Kuang via cutting)
+
+48. HADOOP-1043. Optimize shuffle, increasing parallelism.
+ (Devaraj Das via cutting)
+
+49. HADOOP-940. Improve HDFS's replication scheduling.
+ (Dhruba Borthakur via cutting)
+
+50. HADOOP-1020. Fix a bug in Path resolution, and a with unit tests
+ on Windows. (cutting)
+
+51. HADOOP-941. Enhance record facility.
+ (Milind Bhandarkar via cutting)
+
+52. HADOOP-1000. Fix so that log messages in task subprocesses are
+ not written to a task's standard error. (Arun C Murthy via cutting)
+
+53. HADOOP-1037. Fix bin/slaves.sh, which currently only works with
+ /bin/bash, to specify /bin/bash rather than /bin/sh. (cutting)
+
+54. HADOOP-1046. Clean up tmp from partially received stale block files. (ab)
+
+55. HADOOP-1041. Optimize mapred counter implementation. Also group
+ counters by their declaring Enum. (David Bowen via cutting)
+
+56. HADOOP-1032. Permit one to specify jars that will be cached
+ across multiple jobs. (Gautam Kowshik via cutting)
+
+57. HADOOP-1051. Add optional checkstyle task to build.xml. To use
+ this developers must download the (LGPL'd) checkstyle jar
+ themselves. (tomwhite via cutting)
+
+58. HADOOP-1049. Fix a race condition in IPC client.
+ (Devaraj Das via cutting)
+
+60. HADOOP-1056. Check HDFS include/exclude node lists with both IP
+ address and hostname. (Wendy Chien via cutting)
+
+61. HADOOP-994. In HDFS, limit the number of blocks invalidated at
+ once. Large lists were causing datenodes to timeout.
+ (Dhruba Borthakur via cutting)
+
+62. HADOOP-432. Add a trash feature, disabled by default. When
+ enabled, the FSShell 'rm' command will move things to a trash
+ directory in the filesystem. In HDFS, a thread periodically
+ checkpoints the trash and removes old checkpoints. (cutting)
+
+
+Release 0.11.2 - 2007-02-16
+
+ 1. HADOOP-1009. Fix an infinite loop in the HDFS namenode.
+ (Dhruba Borthakur via cutting)
+
+ 2. HADOOP-1014. Disable in-memory merging during shuffle, as this is
+ causing data corruption. (Devaraj Das via cutting)
+
+
+Release 0.11.1 - 2007-02-09
+
+ 1. HADOOP-976. Make SequenceFile.Metadata public. (Runping Qi via cutting)
+
+ 2. HADOOP-917. Fix a NullPointerException in SequenceFile's merger
+ with large map outputs. (omalley via cutting)
+
+ 3. HADOOP-984. Fix a bug in shuffle error handling introduced by
+ HADOOP-331. If a map output is unavailable, the job tracker is
+ once more informed. (Arun C Murthy via cutting)
+
+ 4. HADOOP-987. Fix a problem in HDFS where blocks were not removed
+ from neededReplications after a replication target was selected.
+ (Hairong Kuang via cutting)
+
+Release 0.11.0 - 2007-02-02
+
+ 1. HADOOP-781. Remove methods deprecated in 0.10 that are no longer
+ widely used. (cutting)
+
+ 2. HADOOP-842. Change HDFS protocol so that the open() method is
+ passed the client hostname, to permit the namenode to order block
+ locations on the basis of network topology.
+ (Hairong Kuang via cutting)
+
+ 3. HADOOP-852. Add an ant task to compile record definitions, and
+ use it to compile record unit tests. (Milind Bhandarkar via cutting)
+
+ 4. HADOOP-757. Fix "Bad File Descriptor" exception in HDFS client
+ when an output file is closed twice. (Raghu Angadi via cutting)
+
+ 5. [ intentionally blank ]
+
+ 6. HADOOP-890. Replace dashes in metric names with underscores,
+ for better compatibility with some monitoring systems.
+ (Nigel Daley via cutting)
+
+ 7. HADOOP-801. Add to jobtracker a log of task completion events.
+ (Sanjay Dahiya via cutting)
+
+ 8. HADOOP-855. In HDFS, try to repair files with checksum errors.
+ An exception is still thrown, but corrupt blocks are now removed
+ when they have replicas. (Wendy Chien via cutting)
+
+ 9. HADOOP-886. Reduce number of timer threads created by metrics API
+ by pooling contexts. (Nigel Daley via cutting)
+
+10. HADOOP-897. Add a "javac.args" property to build.xml that permits
+ one to pass arbitrary options to javac. (Milind Bhandarkar via cutting)
+
+11. HADOOP-899. Update libhdfs for changes in HADOOP-871.
+ (Sameer Paranjpye via cutting)
+
+12. HADOOP-905. Remove some dead code from JobClient. (cutting)
+
+13. HADOOP-902. Fix a NullPointerException in HDFS client when
+ closing output streams. (Raghu Angadi via cutting)
+
+14. HADOOP-735. Switch generated record code to use BytesWritable to
+ represent fields of type 'buffer'. (Milind Bhandarkar via cutting)
+
+15. HADOOP-830. Improve mapreduce merge performance by buffering and
+ merging multiple map outputs as they arrive at reduce nodes before
+ they're written to disk. (Devaraj Das via cutting)
+
+16. HADOOP-908. Add a new contrib package, Abacus, that simplifies
+ counting and aggregation, built on MapReduce. (Runping Qi via cutting)
+
+17. HADOOP-901. Add support for recursive renaming to the S3 filesystem.
+ (Tom White via cutting)
+
+18. HADOOP-912. Fix a bug in TaskTracker.isIdle() that was
+ sporadically causing unit test failures. (Arun C Murthy via cutting)
+
+19. HADOOP-909. Fix the 'du' command to correctly compute the size of
+ FileSystem directory trees. (Hairong Kuang via cutting)
+
+20. HADOOP-731. When a checksum error is encountered on a file stored
+ in HDFS, try another replica of the data, if any.
+ (Wendy Chien via cutting)
+
+21. HADOOP-732. Add support to SequenceFile for arbitrary metadata,
+ as a set of attribute value pairs. (Runping Qi via cutting)
+
+22. HADOOP-929. Fix PhasedFileSystem to pass configuration to
+ underlying FileSystem. (Sanjay Dahiya via cutting)
+
+23. HADOOP-935. Fix contrib/abacus to not delete pre-existing output
+ files, but rather to fail in this case. (Runping Qi via cutting)
+
+24. HADOOP-936. More metric renamings, as in HADOOP-890.
+ (Nigel Daley via cutting)
+
+25. HADOOP-856. Fix HDFS's fsck command to not report that
+ non-existent filesystems are healthy. (Milind Bhandarkar via cutting)
+
+26. HADOOP-602. Remove the dependency on Lucene's PriorityQueue
+ utility, by copying it into Hadoop. This facilitates using Hadoop
+ with different versions of Lucene without worrying about CLASSPATH
+ order. (Milind Bhandarkar via cutting)
+
+27. [ intentionally blank ]
+
+28. HADOOP-227. Add support for backup namenodes, which periodically
+ get snapshots of the namenode state. (Dhruba Borthakur via cutting)
+
+29. HADOOP-884. Add scripts in contrib/ec2 to facilitate running
+ Hadoop on an Amazon's EC2 cluster. (Tom White via cutting)
+
+30. HADOOP-937. Change the namenode to request re-registration of
+ datanodes in more circumstances. (Hairong Kuang via cutting)
+
+31. HADOOP-922. Optimize small forward seeks in HDFS. If data is has
+ likely already in flight, skip ahead rather than re-opening the
+ block. (Dhruba Borthakur via cutting)
+
+32. HADOOP-961. Add a 'job -events' sub-command that prints job
+ events, including task completions and failures. (omalley via cutting)
+
+33. HADOOP-959. Fix namenode snapshot code added in HADOOP-227 to
+ work on Windows. (Dhruba Borthakur via cutting)
+
+34. HADOOP-934. Fix TaskTracker to catch metrics exceptions that were
+ causing heartbeats to fail. (Arun Murthy via cutting)
+
+35. HADOOP-881. Fix JobTracker web interface to display the correct
+ number of task failures. (Sanjay Dahiya via cutting)
+
+36. HADOOP-788. Change contrib/streaming to subclass TextInputFormat,
+ permitting it to take advantage of native compression facilities.
+ (Sanjay Dahiya via cutting)
+
+37. HADOOP-962. In contrib/ec2: make scripts executable in tar file;
+ add a README; make the environment file use a template.
+ (Tom White via cutting)
+
+38. HADOOP-549. Fix a NullPointerException in TaskReport's
+ serialization. (omalley via cutting)
+
+39. HADOOP-963. Fix remote exceptions to have the stack trace of the
+ caller thread, not the IPC listener thread. (omalley via cutting)
+
+40. HADOOP-967. Change RPC clients to start sending a version header.
+ (omalley via cutting)
+
+41. HADOOP-964. Fix a bug introduced by HADOOP-830 where jobs failed
+ whose comparators and/or i/o types were in the job's jar.
+ (Dennis Kubes via cutting)
+
+42. HADOOP-969. Fix a deadlock in JobTracker. (omalley via cutting)
+
+43. HADOOP-862. Add support for the S3 FileSystem to the CopyFiles
+ tool. (Michael Stack via cutting)
+
+44. HADOOP-965. Fix IsolationRunner so that job's jar can be found.
+ (Dennis Kubes via cutting)
+
+45. HADOOP-309. Fix two NullPointerExceptions in StatusHttpServer.
+ (navychen via cutting)
+
+46. HADOOP-692. Add rack awareness to HDFS's placement of blocks.
+ (Hairong Kuang via cutting)
+
+
+Release 0.10.1 - 2007-01-10
+
+ 1. HADOOP-857. Fix S3 FileSystem implementation to permit its use
+ for MapReduce input and output. (Tom White via cutting)
+
+ 2. HADOOP-863. Reduce logging verbosity introduced by HADOOP-813.
+ (Devaraj Das via cutting)
+
+ 3. HADOOP-815. Fix memory leaks in JobTracker. (Arun C Murthy via cutting)
+
+ 4. HADOOP-600. Fix a race condition in JobTracker.
+ (Arun C Murthy via cutting)
+
+ 5. HADOOP-864. Fix 'bin/hadoop -jar' to operate correctly when
+ hadoop.tmp.dir does not yet exist. (omalley via cutting)
+
+ 6. HADOOP-866. Fix 'dfs -get' command to remove existing crc files,
+ if any. (Milind Bhandarkar via cutting)
+
+ 7. HADOOP-871. Fix a bug in bin/hadoop setting JAVA_LIBRARY_PATH.
+ (Arun C Murthy via cutting)
+
+ 8. HADOOP-868. Decrease the number of open files during map,
+ respecting io.sort.fa ctor. (Devaraj Das via cutting)
+
+ 9. HADOOP-865. Fix S3 FileSystem so that partially created files can
+ be deleted. (Tom White via cutting)
+
+10. HADOOP-873. Pass java.library.path correctly to child processes.
+ (omalley via cutting)
+
+11. HADOOP-851. Add support for the LZO codec. This is much faster
+ than the default, zlib-based compression, but it is only available
+ when the native library is built. (Arun C Murthy via cutting)
+
+12. HADOOP-880. Fix S3 FileSystem to remove directories.
+ (Tom White via cutting)
+
+13. HADOOP-879. Fix InputFormatBase to handle output generated by
+ MapFileOutputFormat. (cutting)
+
+14. HADOOP-659. In HDFS, prioritize replication of blocks based on
+ current replication level. Blocks which are severely
+ under-replicated should be further replicated before blocks which
+ are less under-replicated. (Hairong Kuang via cutting)
+
+15. HADOOP-726. Deprecate FileSystem locking methods. They are not
+ currently usable. Locking should eventually provided as an
+ independent service. (Raghu Angadi via cutting)
+
+16. HADOOP-758. Fix exception handling during reduce so that root
+ exceptions are not masked by exceptions in cleanups.
+ (Raghu Angadi via cutting)
+
+
+Release 0.10.0 - 2007-01-05
+
+ 1. HADOOP-763. Change DFS namenode benchmark to not use MapReduce.
+ (Nigel Daley via cutting)
+
+ 2. HADOOP-777. Use fully-qualified hostnames for tasktrackers and
+ datanodes. (Mahadev Konar via cutting)
+
+ 3. HADOOP-621. Change 'dfs -cat' to exit sooner when output has been
+ closed. (Dhruba Borthakur via cutting)
+
+ 4. HADOOP-752. Rationalize some synchronization in DFS namenode.
+ (Dhruba Borthakur via cutting)
+
+ 5. HADOOP-629. Fix RPC services to better check the protocol name and
+ version. (omalley via cutting)
+
+ 6. HADOOP-774. Limit the number of invalid blocks returned with
+ heartbeats by the namenode to datanodes. Transmitting and
+ processing very large invalid block lists can tie up both the
+ namenode and datanode for too long. (Dhruba Borthakur via cutting)
+
+ 7. HADOOP-738. Change 'dfs -get' command to not create CRC files by
+ default, adding a -crc option to force their creation.
+ (Milind Bhandarkar via cutting)
+
+ 8. HADOOP-676. Improved exceptions and error messages for common job
+ input specification errors. (Sanjay Dahiya via cutting)
+
+ 9. [Included in 0.9.2 release]
+
+10. HADOOP-756. Add new dfsadmin option to wait for filesystem to be
+ operational. (Dhruba Borthakur via cutting)
+
+11. HADOOP-770. Fix jobtracker web interface to display, on restart,
+ jobs that were running when it was last stopped.
+ (Sanjay Dahiya via cutting)
+
+12. HADOOP-331. Write all map outputs to a single file with an index,
+ rather than to a separate file per reduce task. This should both
+ speed the shuffle and make things more scalable.
+ (Devaraj Das via cutting)
+
+13. HADOOP-818. Fix contrib unit tests to not depend on core unit
+ tests. (omalley via cutting)
+
+14. HADOOP-786. Log common exception at debug level.
+ (Sanjay Dahiya via cutting)
+
+15. HADOOP-796. Provide more convenient access to failed task
+ information in the web interface. (Sanjay Dahiya via cutting)
+
+16. HADOOP-764. Reduce memory allocations in namenode some.
+ (Dhruba Borthakur via cutting)
+
+17. HADOOP-802. Update description of mapred.speculative.execution to
+ mention reduces. (Nigel Daley via cutting)
+
+18. HADOOP-806. Include link to datanodes on front page of namenode
+ web interface. (Raghu Angadi via cutting)
+
+19. HADOOP-618. Make JobSubmissionProtocol public.
+ (Arun C Murthy via cutting)
+
+20. HADOOP-782. Fully remove killed tasks. (Arun C Murthy via cutting)
+
+21. HADOOP-792. Fix 'dfs -mv' to return correct status.
+ (Dhruba Borthakur via cutting)
+
+22. HADOOP-673. Give each task its own working directory again.
+ (Mahadev Konar via cutting)
+
+23. HADOOP-571. Extend the syntax of Path to be a URI; to be
+ optionally qualified with a scheme and authority. The scheme
+ determines the FileSystem implementation, while the authority
+ determines the FileSystem instance. New FileSystem
+ implementations may be provided by defining an fs.<scheme>.impl
+ property, naming the FileSystem implementation class. This
+ permits easy integration of new FileSystem implementations.
+ (cutting)
+
+24. HADOOP-720. Add an HDFS white paper to website.
+ (Dhruba Borthakur via cutting)
+
+25. HADOOP-794. Fix a divide-by-zero exception when a job specifies
+ zero map tasks. (omalley via cutting)
+
+26. HADOOP-454. Add a 'dfs -dus' command that provides summary disk
+ usage. (Hairong Kuang via cutting)
+
+27. HADOOP-574. Add an Amazon S3 implementation of FileSystem. To
+ use this, one need only specify paths of the form
+ s3://id:secret@bucket/. Alternately, the AWS access key id and
+ secret can be specified in your config, with the properties
+ fs.s3.awsAccessKeyId and fs.s3.awsSecretAccessKey.
+ (Tom White via cutting)
+
+28. HADOOP-824. Rename DFSShell to be FsShell, since it applies
+ generically to all FileSystem implementations. (cutting)
+
+29. HADOOP-813. Fix map output sorting to report progress, so that
+ sorts which take longer than the task timeout do not fail.
+ (Devaraj Das via cutting)
+
+30. HADOOP-825. Fix HDFS daemons when configured with new URI syntax.
+ (omalley via cutting)
+
+31. HADOOP-596. Fix a bug in phase reporting during reduce.
+ (Sanjay Dahiya via cutting)
+
+32. HADOOP-811. Add a utility, MultithreadedMapRunner.
+ (Alejandro Abdelnur via cutting)
+
+33. HADOOP-829. Within HDFS, clearly separate three different
+ representations for datanodes: one for RPCs, one for
+ namenode-internal use, and one for namespace persistence.
+ (Dhruba Borthakur via cutting)
+
+34. HADOOP-823. Fix problem starting datanode when not all configured
+ data directories exist. (Bryan Pendleton via cutting)
+
+35. HADOOP-451. Add a Split interface. CAUTION: This incompatibly
+ changes the InputFormat and RecordReader interfaces. Not only is
+ FileSplit replaced with Split, but a FileSystem parameter is no
+ longer passed in several methods, input validation has changed,
+ etc. (omalley via cutting)
+
+36. HADOOP-814. Optimize locking in namenode. (Dhruba Borthakur via cutting)
+
+37. HADOOP-738. Change 'fs -put' and 'fs -get' commands to accept
+ standard input and output, respectively. Standard i/o is
+ specified by a file named '-'. (Wendy Chien via cutting)
+
+38. HADOOP-835. Fix a NullPointerException reading record-compressed
+ SequenceFiles. (Hairong Kuang via cutting)
+
+39. HADOOP-836. Fix a MapReduce bug on Windows, where the wrong
+ FileSystem was used. Also add a static FileSystem.getLocal()
+ method and better Path checking in HDFS, to help avoid such issues
+ in the future. (omalley via cutting)
+
+40. HADOOP-837. Improve RunJar utility to unpack jar file
+ hadoop.tmp.dir, rather than the system temporary directory.
+ (Hairong Kuang via cutting)
+
+41. HADOOP-841. Fix native library to build 32-bit version even when
+ on a 64-bit host, if a 32-bit JVM is used. (Arun C Murthy via cutting)
+
+42. HADOOP-838. Fix tasktracker to pass java.library.path to
+ sub-processes, so that libhadoop.a is found.
+ (Arun C Murthy via cutting)
+
+43. HADOOP-844. Send metrics messages on a fixed-delay schedule
+ instead of a fixed-rate schedule. (David Bowen via cutting)
+
+44. HADOOP-849. Fix OutOfMemory exceptions in TaskTracker due to a
+ file handle leak in SequenceFile. (Devaraj Das via cutting)
+
+45. HADOOP-745. Fix a synchronization bug in the HDFS namenode.
+ (Dhruba Borthakur via cutting)
+
+46. HADOOP-850. Add Writable implementations for variable-length
+ integers. (ab via cutting)
+
+47. HADOOP-525. Add raw comparators to record types. This greatly
+ improves record sort performance. (Milind Bhandarkar via cutting)
+
+48. HADOOP-628. Fix a problem with 'fs -cat' command, where some
+ characters were replaced with question marks. (Wendy Chien via cutting)
+
+49. HADOOP-804. Reduce verbosity of MapReduce logging.
+ (Sanjay Dahiya via cutting)
+
+50. HADOOP-853. Rename 'site' to 'docs', in preparation for inclusion
+ in releases. (cutting)
+
+51. HADOOP-371. Include contrib jars and site documentation in
+ distributions. Also add contrib and example documentation to
+ distributed javadoc, in separate sections. (Nigel Daley via cutting)
+
+52. HADOOP-846. Report progress during entire map, as sorting of
+ intermediate outputs may happen at any time, potentially causing
+ task timeouts. (Devaraj Das via cutting)
+
+53. HADOOP-840. In task tracker, queue task cleanups and perform them
+ in a separate thread. (omalley & Mahadev Konar via cutting)
+
+54. HADOOP-681. Add to HDFS the ability to decommission nodes. This
+ causes their blocks to be re-replicated on other nodes, so that
+ they may be removed from a cluster. (Dhruba Borthakur via cutting)
+
+55. HADOOP-470. In HDFS web ui, list the datanodes containing each
+ copy of a block. (Hairong Kuang via cutting)
+
+56. HADOOP-700. Change bin/hadoop to only include core jar file on
+ classpath, not example, test, etc. Also rename core jar to
+ hadoop-${version}-core.jar so that it can be more easily
+ identified. (Nigel Daley via cutting)
+
+57. HADOOP-619. Extend InputFormatBase to accept individual files and
+ glob patterns as MapReduce inputs, not just directories. Also
+ change contrib/streaming to use this. (Sanjay Dahia via cutting)
+
+
+Release 0.9.2 - 2006-12-15
+
+ 1. HADOOP-639. Restructure InterTrackerProtocol to make task
+ accounting more reliable. (Arun C Murthy via cutting)
+
+ 2. HADOOP-827. Turn off speculative execution by default, since it's
+ currently broken. (omalley via cutting)
+
+ 3. HADOOP-791. Fix a deadlock in the task tracker.
+ (Mahadev Konar via cutting)
+
+
+Release 0.9.1 - 2006-12-06
+
+ 1. HADOOP-780. Use ReflectionUtils to instantiate key and value
+ objects. (ab)
+
+ 2. HADOOP-779. Fix contrib/streaming to work correctly with gzipped
+ input files. (Hairong Kuang via cutting)
+
+
+Release 0.9.0 - 2006-12-01
+
+ 1. HADOOP-655. Remove most deprecated code. A few deprecated things
+ remain, notably UTF8 and some methods that are still required.
+ Also cleaned up constructors for SequenceFile, MapFile, SetFile,
+ and ArrayFile a bit. (cutting)
+
+ 2. HADOOP-565. Upgrade to Jetty version 6. (Sanjay Dahiya via cutting)
+
+ 3. HADOOP-682. Fix DFS format command to work correctly when
+ configured with a non-existent directory. (Sanjay Dahiya via cutting)
+
+ 4. HADOOP-645. Fix a bug in contrib/streaming when -reducer is NONE.
+ (Dhruba Borthakur via cutting)
+
+ 5. HADOOP-687. Fix a classpath bug in bin/hadoop that blocked the
+ servers from starting. (Sameer Paranjpye via omalley)
+
+ 6. HADOOP-683. Remove a script dependency on bash, so it works with
+ dash, the new default for /bin/sh on Ubuntu. (James Todd via cutting)
+
+ 7. HADOOP-382. Extend unit tests to run multiple datanodes.
+ (Milind Bhandarkar via cutting)
+
+ 8. HADOOP-604. Fix some synchronization issues and a
+ NullPointerException in DFS datanode. (Raghu Angadi via cutting)
+
+ 9. HADOOP-459. Fix memory leaks and a host of other issues with
+ libhdfs. (Sameer Paranjpye via cutting)
+
+10. HADOOP-694. Fix a NullPointerException in jobtracker.
+ (Mahadev Konar via cutting)
+
+11. HADOOP-637. Fix a memory leak in the IPC server. Direct buffers
+ are not collected like normal buffers, and provided little
+ advantage. (Raghu Angadi via cutting)
+
+12. HADOOP-696. Fix TestTextInputFormat unit test to not rely on the
+ order of directory listings. (Sameer Paranjpye via cutting)
+
+13. HADOOP-611. Add support for iterator-based merging to
+ SequenceFile. (Devaraj Das via cutting)
+
+14. HADOOP-688. Move DFS administrative commands to a separate
+ command named 'dfsadmin'. (Dhruba Borthakur via cutting)
+
+15. HADOOP-708. Fix test-libhdfs to return the correct status, so
+ that failures will break the build. (Nigel Daley via cutting)
+
+16. HADOOP-646. Fix namenode to handle edits files larger than 2GB.
+ (Milind Bhandarkar via cutting)
+
+17. HADOOP-705. Fix a bug in the JobTracker when failed jobs were
+ not completely cleaned up. (Mahadev Konar via cutting)
+
+18. HADOOP-613. Perform final merge while reducing. This removes one
+ sort pass over the data and should consequently significantly
+ decrease overall processing time. (Devaraj Das via cutting)
+
+19. HADOOP-661. Make each job's configuration visible through the web
+ ui. (Arun C Murthy via cutting)
+
+20. HADOOP-489. In MapReduce, separate user logs from system logs.
+ Each task's log output is now available through the web ui. (Arun
+ C Murthy via cutting)
+
+21. HADOOP-712. Fix record io's xml serialization to correctly handle
+ control-characters. (Milind Bhandarkar via cutting)
+
+22. HADOOP-668. Improvements to the web-based DFS browser.
+ (Hairong Kuang via cutting)
+
+23. HADOOP-715. Fix build.xml so that test logs are written in build
+ directory, rather than in CWD. (Arun C Murthy via cutting)
+
+24. HADOOP-538. Add support for building an optional native library,
+ libhadoop.so, that improves the performance of zlib-based
+ compression. To build this, specify -Dcompile.native to Ant.
+ (Arun C Murthy via cutting)
+
+25. HADOOP-610. Fix an problem when the DFS block size is configured
+ to be smaller than the buffer size, typically only when debugging.
+ (Milind Bhandarkar via cutting)
+
+26. HADOOP-695. Fix a NullPointerException in contrib/streaming.
+ (Hairong Kuang via cutting)
+
+27. HADOOP-652. In DFS, when a file is deleted, the block count is
+ now decremented. (Vladimir Krokhmalyov via cutting)
+
+28. HADOOP-725. In DFS, optimize block placement algorithm,
+ previously a performance bottleneck. (Milind Bhandarkar via cutting)
+
+29. HADOOP-723. In MapReduce, fix a race condition during the
+ shuffle, which resulted in FileNotFoundExceptions. (omalley via cutting)
+
+30. HADOOP-447. In DFS, fix getBlockSize(Path) to work with relative
+ paths. (Raghu Angadi via cutting)
+
+31. HADOOP-733. Make exit codes in DFShell consistent and add a unit
+ test. (Dhruba Borthakur via cutting)
+
+32. HADOOP-709. Fix contrib/streaming to work with commands that
+ contain control characters. (Dhruba Borthakur via cutting)
+
+33. HADOOP-677. In IPC, permit a version header to be transmitted
+ when connections are established. This will permit us to change
+ the format of IPC requests back-compatibly in subsequent releases.
+ (omalley via cutting)
+
+34. HADOOP-699. Fix DFS web interface so that filesystem browsing
+ works correctly, using the right port number. Also add support
+ for sorting datanode list by various columns.
+ (Raghu Angadi via cutting)
+
+35. HADOOP-76. Implement speculative reduce. Now when a job is
+ configured for speculative execution, both maps and reduces will
+ execute speculatively. Reduce outputs are written to temporary
+ location and moved to the final location when reduce is complete.
+ (Sanjay Dahiya via cutting)
+
+36. HADOOP-736. Roll back to Jetty 5.1.4, due to performance problems
+ with Jetty 6.0.1.
+
+37. HADOOP-739. Fix TestIPC to use different port number, making it
+ more reliable. (Nigel Daley via cutting)
+
+38. HADOOP-749. Fix a NullPointerException in jobfailures.jsp.
+ (omalley via cutting)
+
+39. HADOOP-747. Fix record serialization to work correctly when
+ records are embedded in Maps. (Milind Bhandarkar via cutting)
+
+40. HADOOP-698. Fix HDFS client not to retry the same datanode on
+ read failures. (Milind Bhandarkar via cutting)
+
+41. HADOOP-689. Add GenericWritable, to facilitate polymorphism in
+ MapReduce, SequenceFile, etc. (Feng Jiang via cutting)
+
+42. HADOOP-430. Stop datanode's HTTP server when registration with
+ namenode fails. (Wendy Chien via cutting)
+
+43. HADOOP-750. Fix a potential race condition during mapreduce
+ shuffle. (omalley via cutting)
+
+44. HADOOP-728. Fix contrib/streaming-related issues, including
+ '-reducer NONE'. (Sanjay Dahiya via cutting)
+
+
+Release 0.8.0 - 2006-11-03
+
+ 1. HADOOP-477. Extend contrib/streaming to scan the PATH environment
+ variables when resolving executable program names.
+ (Dhruba Borthakur via cutting)
+
+ 2. HADOOP-583. In DFSClient, reduce the log level of re-connect
+ attempts from 'info' to 'debug', so they are not normally shown.
+ (Konstantin Shvachko via cutting)
+
+ 3. HADOOP-498. Re-implement DFS integrity checker to run server-side,
+ for much improved performance. (Milind Bhandarkar via cutting)
+
+ 4. HADOOP-586. Use the jar name for otherwise un-named jobs.
+ (Sanjay Dahiya via cutting)
+
+ 5. HADOOP-514. Make DFS heartbeat interval configurable.
+ (Milind Bhandarkar via cutting)
+
+ 6. HADOOP-588. Fix logging and accounting of failed tasks.
+ (Sanjay Dahiya via cutting)
+
+ 7. HADOOP-462. Improve command line parsing in DFSShell, so that
+ incorrect numbers of arguments result in informative errors rather
+ than ArrayOutOfBoundsException. (Dhruba Borthakur via cutting)
+
+ 8. HADOOP-561. Fix DFS so that one replica of each block is written
+ locally, if possible. This was the intent, but there as a bug.
+ (Dhruba Borthakur via cutting)
+
+ 9. HADOOP-610. Fix TaskTracker to survive more exceptions, keeping
+ tasks from becoming lost. (omalley via cutting)
+
+10. HADOOP-625. Add a servlet to all http daemons that displays a
+ stack dump, useful for debugging. (omalley via cutting)
+
+11. HADOOP-554. Fix DFSShell to return -1 for errors.
+ (Dhruba Borthakur via cutting)
+
+12. HADOOP-626. Correct the documentation in the NNBench example
+ code, and also remove a mistaken call there.
+ (Nigel Daley via cutting)
+
+13. HADOOP-634. Add missing license to many files.
+ (Nigel Daley via cutting)
+
+14. HADOOP-627. Fix some synchronization problems in MiniMRCluster
+ that sometimes caused unit tests to fail. (Nigel Daley via cutting)
+
+15. HADOOP-563. Improve the NameNode's lease policy so that leases
+ are held for one hour without renewal (instead of one minute).
+ However another attempt to create the same file will still succeed
+ if the lease has not been renewed within a minute. This prevents
+ communication or scheduling problems from causing a write to fail
+ for up to an hour, barring some other process trying to create the
+ same file. (Dhruba Borthakur via cutting)
+
+16. HADOOP-635. In DFSShell, permit specification of multiple files
+ as the source for file copy and move commands.
+ (Dhruba Borthakur via cutting)
+
+17. HADOOP-641. Change NameNode to request a fresh block report from
+ a re-discovered DataNode, so that no-longer-needed replications
+ are stopped promptly. (Konstantin Shvachko via cutting)
+
+18. HADOOP-642. Change IPC client to specify an explicit connect
+ timeout. (Konstantin Shvachko via cutting)
+
+19. HADOOP-638. Fix an unsynchronized access to TaskTracker's
+ internal state. (Nigel Daley via cutting)
+
+20. HADOOP-624. Fix servlet path to stop a Jetty warning on startup.
+ (omalley via cutting)
+
+21. HADOOP-578. Failed tasks are no longer placed at the end of the
+ task queue. This was originally done to work around other
+ problems that have now been fixed. Re-executing failed tasks
+ sooner causes buggy jobs to fail faster. (Sanjay Dahiya via cutting)
+
+22. HADOOP-658. Update source file headers per Apache policy. (cutting)
+
+23. HADOOP-636. Add MapFile & ArrayFile constructors which accept a
+ Progressable, and pass it down to SequenceFile. This permits
+ reduce tasks which use MapFile to still report progress while
+ writing blocks to the filesystem. (cutting)
+
+24. HADOOP-576. Enable contrib/streaming to use the file cache. Also
+ extend the cache to permit symbolic links to cached items, rather
+ than local file copies. (Mahadev Konar via cutting)
+
+25. HADOOP-482. Fix unit tests to work when a cluster is running on
+ the same machine, removing port conflicts. (Wendy Chien via cutting)
+
+26. HADOOP-90. Permit dfs.name.dir to list multiple directories,
+ where namenode data is to be replicated. (Milind Bhandarkar via cutting)
+
+27. HADOOP-651. Fix DFSCk to correctly pass parameters to the servlet
+ on the namenode. (Milind Bhandarkar via cutting)
+
+28. HADOOP-553. Change main() routines of DataNode and NameNode to
+ log exceptions rather than letting the JVM print them to standard
+ error. Also, change the hadoop-daemon.sh script to rotate
+ standard i/o log files. (Raghu Angadi via cutting)
+
+29. HADOOP-399. Fix javadoc warnings. (Nigel Daley via cutting)
+
+30. HADOOP-599. Fix web ui and command line to correctly report DFS
+ filesystem size statistics. Also improve web layout.
+ (Raghu Angadi via cutting)
+
+31. HADOOP-660. Permit specification of junit test output format.
+ (Nigel Daley via cutting)
+
+32. HADOOP-663. Fix a few unit test issues. (Mahadev Konar via cutting)
+
+33. HADOOP-664. Cause entire build to fail if libhdfs tests fail.
+ (Nigel Daley via cutting)
+
+34. HADOOP-633. Keep jobtracker from dying when job initialization
+ throws exceptions. Also improve exception handling in a few other
+ places and add more informative thread names.
+ (omalley via cutting)
+
+35. HADOOP-669. Fix a problem introduced by HADOOP-90 that can cause
+ DFS to lose files. (Milind Bhandarkar via cutting)
+
+36. HADOOP-373. Consistently check the value returned by
+ FileSystem.mkdirs(). (Wendy Chien via cutting)
+
+37. HADOOP-670. Code cleanups in some DFS internals: use generic
+ types, replace Vector with ArrayList, etc.
+ (Konstantin Shvachko via cutting)
+
+38. HADOOP-647. Permit map outputs to use a different compression
+ type than the job output. (omalley via cutting)
+
+39. HADOOP-671. Fix file cache to check for pre-existence before
+ creating . (Mahadev Konar via cutting)
+
+40. HADOOP-665. Extend many DFSShell commands to accept multiple
+ arguments. Now commands like "ls", "rm", etc. will operate on
+ multiple files. (Dhruba Borthakur via cutting)
+
+
+Release 0.7.2 - 2006-10-18
+
+ 1. HADOOP-607. Fix a bug where classes included in job jars were not
+ found by tasks. (Mahadev Konar via cutting)
+
+ 2. HADOOP-609. Add a unit test that checks that classes in job jars
+ can be found by tasks. Also modify unit tests to specify multiple
+ local directories. (Mahadev Konar via cutting)
+
+
+Release 0.7.1 - 2006-10-11
+
+ 1. HADOOP-593. Fix a NullPointerException in the JobTracker.
+ (omalley via cutting)
+
+ 2. HADOOP-592. Fix a NullPointerException in the IPC Server. Also
+ consistently log when stale calls are discarded. (omalley via cutting)
+
+ 3. HADOOP-594. Increase the DFS safe-mode threshold from .95 to
+ .999, so that nearly all blocks must be reported before filesystem
+ modifications are permitted. (Konstantin Shvachko via cutting)
+
+ 4. HADOOP-598. Fix tasks to retry when reporting completion, so that
+ a single RPC timeout won't fail a task. (omalley via cutting)
+
+ 5. HADOOP-597. Fix TaskTracker to not discard map outputs for errors
+ in transmitting them to reduce nodes. (omalley via cutting)
+
+
+Release 0.7.0 - 2006-10-06
+
+ 1. HADOOP-243. Fix rounding in the display of task and job progress
+ so that things are not shown to be 100% complete until they are in
+ fact finished. (omalley via cutting)
+
+ 2. HADOOP-438. Limit the length of absolute paths in DFS, since the
+ file format used to store pathnames has some limitations.
+ (Wendy Chien via cutting)
+
+ 3. HADOOP-530. Improve error messages in SequenceFile when keys or
+ values are of the wrong type. (Hairong Kuang via cutting)
+
+ 4. HADOOP-288. Add a file caching system and use it in MapReduce to
+ cache job jar files on slave nodes. (Mahadev Konar via cutting)
+
+ 5. HADOOP-533. Fix unit test to not modify conf directory.
+ (Hairong Kuang via cutting)
+
+ 6. HADOOP-527. Permit specification of the local address that various
+ Hadoop daemons should bind to. (Philippe Gassmann via cutting)
+
+ 7. HADOOP-542. Updates to contrib/streaming: reformatted source code,
+ on-the-fly merge sort, a fix for HADOOP-540, etc.
+ (Michel Tourn via cutting)
+
+ 8. HADOOP-545. Remove an unused config file parameter.
+ (Philippe Gassmann via cutting)
+
+ 9. HADOOP-548. Add an Ant property "test.output" to build.xml that
+ causes test output to be logged to the console. (omalley via cutting)
+
+10. HADOOP-261. Record an error message when map output is lost.
+ (omalley via cutting)
+
+11. HADOOP-293. Report the full list of task error messages in the
+ web ui, not just the most recent. (omalley via cutting)
+
+12. HADOOP-551. Restore JobClient's console printouts to only include
+ a maximum of one update per one percent of progress.
+ (omalley via cutting)
+
+13. HADOOP-306. Add a "safe" mode to DFS. The name node enters this
+ when less than a specified percentage of file data is complete.
+ Currently safe mode is only used on startup, but eventually it
+ will also be entered when datanodes disconnect and file data
+ becomes incomplete. While in safe mode no filesystem
+ modifications are permitted and block replication is inhibited.
+ (Konstantin Shvachko via cutting)
+
+14. HADOOP-431. Change 'dfs -rm' to not operate recursively and add a
+ new command, 'dfs -rmr' which operates recursively.
+ (Sameer Paranjpye via cutting)
+
+15. HADOOP-263. Include timestamps for job transitions. The web
+ interface now displays the start and end times of tasks and the
+ start times of sorting and reducing for reduce tasks. Also,
+ extend ObjectWritable to handle enums, so that they can be passed
+ as RPC parameters. (Sanjay Dahiya via cutting)
+
+16. HADOOP-556. Contrib/streaming: send keep-alive reports to task
+ tracker every 10 seconds rather than every 100 records, to avoid
+ task timeouts. (Michel Tourn via cutting)
+
+17. HADOOP-547. Fix reduce tasks to ping tasktracker while copying
+ data, rather than only between copies, avoiding task timeouts.
+ (Sanjay Dahiya via cutting)
+
+18. HADOOP-537. Fix src/c++/libhdfs build process to create files in
+ build/, no longer modifying the source tree.
+ (Arun C Murthy via cutting)
+
+19. HADOOP-487. Throw a more informative exception for unknown RPC
+ hosts. (Sameer Paranjpye via cutting)
+
+20. HADOOP-559. Add file name globbing (pattern matching) support to
+ the FileSystem API, and use it in DFSShell ('bin/hadoop dfs')
+ commands. (Hairong Kuang via cutting)
+
+21. HADOOP-508. Fix a bug in FSDataInputStream. Incorrect data was
+ returned after seeking to a random location.
+ (Milind Bhandarkar via cutting)
+
+22. HADOOP-560. Add a "killed" task state. This can be used to
+ distinguish kills from other failures. Task state has also been
+ converted to use an enum type instead of an int, uncovering a bug
+ elsewhere. The web interface is also updated to display killed
+ tasks. (omalley via cutting)
+
+23. HADOOP-423. Normalize Paths containing directories named "." and
+ "..", using the standard, unix interpretation. Also add checks in
+ DFS, prohibiting the use of "." or ".." as directory or file
+ names. (Wendy Chien via cutting)
+
+24. HADOOP-513. Replace map output handling with a servlet, rather
+ than a JSP page. This fixes an issue where
+ IllegalStateException's were logged, sets content-length
+ correctly, and better handles some errors. (omalley via cutting)
+
+25. HADOOP-552. Improved error checking when copying map output files
+ to reduce nodes. (omalley via cutting)
+
+26. HADOOP-566. Fix scripts to work correctly when accessed through
+ relative symbolic links. (Lee Faris via cutting)
+
+27. HADOOP-519. Add positioned read methods to FSInputStream. These
+ permit one to read from a stream without moving its position, and
+ can hence be performed by multiple threads at once on a single
+ stream. Implement an optimized version for DFS and local FS.
+ (Milind Bhandarkar via cutting)
+
+28. HADOOP-522. Permit block compression with MapFile and SetFile.
+ Since these formats are always sorted, block compression can
+ provide a big advantage. (cutting)
+
+29. HADOOP-567. Record version and revision information in builds. A
+ package manifest is added to the generated jar file containing
+ version information, and a VersionInfo utility is added that
+ includes further information, including the build date and user,
+ and the subversion revision and repository. A 'bin/hadoop
+ version' comand is added to show this information, and it is also
+ added to various web interfaces. (omalley via cutting)
+
+30. HADOOP-568. Fix so that errors while initializing tasks on a
+ tasktracker correctly report the task as failed to the jobtracker,
+ so that it will be rescheduled. (omalley via cutting)
+
+31. HADOOP-550. Disable automatic UTF-8 validation in Text. This
+ permits, e.g., TextInputFormat to again operate on non-UTF-8 data.
+ (Hairong and Mahadev via cutting)
+
+32. HADOOP-343. Fix mapred copying so that a failed tasktracker
+ doesn't cause other copies to slow. (Sameer Paranjpye via cutting)
+
+33. HADOOP-239. Add a persistent job history mechanism, so that basic
+ job statistics are not lost after 24 hours and/or when the
+ jobtracker is restarted. (Sanjay Dahiya via cutting)
+
+34. HADOOP-506. Ignore heartbeats from stale task trackers.
+ (Sanjay Dahiya via cutting)
+
+35. HADOOP-255. Discard stale, queued IPC calls. Do not process
+ calls whose clients will likely time out before they receive a
+ response. When the queue is full, new calls are now received and
+ queued, and the oldest calls are discarded, so that, when servers
+ get bogged down, they no longer develop a backlog on the socket.
+ This should improve some DFS namenode failure modes.
+ (omalley via cutting)
+
+36. HADOOP-581. Fix datanode to not reset itself on communications
+ errors with the namenode. If a request to the namenode fails, the
+ datanode should retry, not restart. This reduces the load on the
+ namenode, since restarts cause a resend of the block report.
+ (omalley via cutting)
+
+
+Release 0.6.2 - 2006-09-18
+
+1. HADOOP-532. Fix a bug reading value-compressed sequence files,
+ where an exception was thrown reporting that the full value had not
+ been read. (omalley via cutting)
+
+2. HADOOP-534. Change the default value class in JobConf to be Text
+ instead of the now-deprecated UTF8. This fixes the Grep example
+ program, which was updated to use Text, but relies on this
+ default. (Hairong Kuang via cutting)
+
+
+Release 0.6.1 - 2006-09-13
+
+ 1. HADOOP-520. Fix a bug in libhdfs, where write failures were not
+ correctly returning error codes. (Arun C Murthy via cutting)
+
+ 2. HADOOP-523. Fix a NullPointerException when TextInputFormat is
+ explicitly specified. Also add a test case for this.
+ (omalley via cutting)
+
+ 3. HADOOP-521. Fix another NullPointerException finding the
+ ClassLoader when using libhdfs. (omalley via cutting)
+
+ 4. HADOOP-526. Fix a NullPointerException when attempting to start
+ two datanodes in the same directory. (Milind Bhandarkar via cutting)
+
+ 5. HADOOP-529. Fix a NullPointerException when opening
+ value-compressed sequence files generated by pre-0.6.0 Hadoop.
+ (omalley via cutting)
+
+
+Release 0.6.0 - 2006-09-08
+
+ 1. HADOOP-427. Replace some uses of DatanodeDescriptor in the DFS
+ web UI code with DatanodeInfo, the preferred public class.
+ (Devaraj Das via cutting)
+
+ 2. HADOOP-426. Fix streaming contrib module to work correctly on
+ Solaris. This was causing nightly builds to fail.
+ (Michel Tourn via cutting)
+
+ 3. HADOOP-400. Improvements to task assignment. Tasks are no longer
+ re-run on nodes where they have failed (unless no other node is
+ available). Also, tasks are better load-balanced among nodes.
+ (omalley via cutting)
+
+ 4. HADOOP-324. Fix datanode to not exit when a disk is full, but
+ rather simply to fail writes. (Wendy Chien via cutting)
+
+ 5. HADOOP-434. Change smallJobsBenchmark to use standard Hadoop
+ scripts. (Sanjay Dahiya via cutting)
+
+ 6. HADOOP-453. Fix a bug in Text.setCapacity(). (siren via cutting)
+
+
+ 7. HADOOP-450. Change so that input types are determined by the
+ RecordReader rather than specified directly in the JobConf. This
+ facilitates jobs with a variety of input types.
+
+ WARNING: This contains incompatible API changes! The RecordReader
+ interface has two new methods that all user-defined InputFormats
+ must now define. Also, the values returned by TextInputFormat are
+ no longer of class UTF8, but now of class Text.
+
+ 8. HADOOP-436. Fix an error-handling bug in the web ui.
+ (Devaraj Das via cutting)
+
+ 9. HADOOP-455. Fix a bug in Text, where DEL was not permitted.
+ (Hairong Kuang via cutting)
+
+10. HADOOP-456. Change the DFS namenode to keep a persistent record
+ of the set of known datanodes. This will be used to implement a
+ "safe mode" where filesystem changes are prohibited when a
+ critical percentage of the datanodes are unavailable.
+ (Konstantin Shvachko via cutting)
+
+11. HADOOP-322. Add a job control utility. This permits one to
+ specify job interdependencies. Each job is submitted only after
+ the jobs it depends on have successfully completed.
+ (Runping Qi via cutting)
+
+12. HADOOP-176. Fix a bug in IntWritable.Comparator.
+ (Dick King via cutting)
+
+13. HADOOP-421. Replace uses of String in recordio package with Text
+ class, for improved handling of UTF-8 data.
+ (Milind Bhandarkar via cutting)
+
+14. HADOOP-464. Improved error message when job jar not found.
+ (Michel Tourn via cutting)
+
+15. HADOOP-469. Fix /bin/bash specifics that have crept into our
+ /bin/sh scripts since HADOOP-352.
+ (Jean-Baptiste Quenot via cutting)
+
+16. HADOOP-468. Add HADOOP_NICENESS environment variable to set
+ scheduling priority for daemons. (Vetle Roeim via cutting)
+
+17. HADOOP-473. Fix TextInputFormat to correctly handle more EOL
+ formats. Things now work correctly with CR, LF or CRLF.
+ (Dennis Kubes & James White via cutting)
+
+18. HADOOP-461. Make Java 1.5 an explicit requirement. (cutting)
+
+19. HADOOP-54. Add block compression to SequenceFile. One may now
+ specify that blocks of keys and values are compressed together,
+ improving compression for small keys and values.
+ SequenceFile.Writer's constructor is now deprecated and replaced
+ with a factory method. (Arun C Murthy via cutting)
+
+20. HADOOP-281. Prohibit DFS files that are also directories.
+ (Wendy Chien via cutting)
+
+21. HADOOP-486. Add the job username to JobStatus instances returned
+ by JobClient. (Mahadev Konar via cutting)
+
+22. HADOOP-437. contrib/streaming: Add support for gzipped inputs.
+ (Michel Tourn via cutting)
+
+23. HADOOP-463. Add variable expansion to config files.
+ Configuration property values may now contain variable
+ expressions. A variable is referenced with the syntax
+ '${variable}'. Variables values are found first in the
+ configuration, and then in Java system properties. The default
+ configuration is modified so that temporary directories are now
+ under ${hadoop.tmp.dir}, which is, by default,
+ /tmp/hadoop-${user.name}. (Michel Tourn via cutting)
+
+24. HADOOP-419. Fix a NullPointerException finding the ClassLoader
+ when using libhdfs. (omalley via cutting)
+
+25. HADOOP-460. Fix contrib/smallJobsBenchmark to use Text instead of
+ UTF8. (Sanjay Dahiya via cutting)
+
+26. HADOOP-196. Fix Configuration(Configuration) constructor to work
+ correctly. (Sami Siren via cutting)
+
+27. HADOOP-501. Fix Configuration.toString() to handle URL resources.
+ (Thomas Friol via cutting)
+
+28. HADOOP-499. Reduce the use of Strings in contrib/streaming,
+ replacing them with Text for better performance.
+ (Hairong Kuang via cutting)
+
+29. HADOOP-64. Manage multiple volumes with a single DataNode.
+ Previously DataNode would create a separate daemon per configured
+ volume, each with its own connection to the NameNode. Now all
+ volumes are handled by a single DataNode daemon, reducing the load
+ on the NameNode. (Milind Bhandarkar via cutting)
+
+30. HADOOP-424. Fix MapReduce so that jobs which generate zero splits
+ do not fail. (Fr??d??ric Bertin via cutting)
+
+31. HADOOP-408. Adjust some timeouts and remove some others so that
+ unit tests run faster. (cutting)
+
+32. HADOOP-507. Fix an IllegalAccessException in DFS.
+ (omalley via cutting)
+
+33. HADOOP-320. Fix so that checksum files are correctly copied when
+ the destination of a file copy is a directory.
+ (Hairong Kuang via cutting)
+
+34. HADOOP-286. In DFSClient, avoid pinging the NameNode with
+ renewLease() calls when no files are being written.
+ (Konstantin Shvachko via cutting)
+
+35. HADOOP-312. Close idle IPC connections. All IPC connections were
+ cached forever. Now, after a connection has been idle for more
+ than a configurable amount of time (one second by default), the
+ connection is closed, conserving resources on both client and
+ server. (Devaraj Das via cutting)
+
+36. HADOOP-497. Permit the specification of the network interface and
+ nameserver to be used when determining the local hostname
+ advertised by datanodes and tasktrackers.
+ (Lorenzo Thione via cutting)
+
+37. HADOOP-441. Add a compression codec API and extend SequenceFile
+ to use it. This will permit the use of alternate compression
+ codecs in SequenceFile. (Arun C Murthy via cutting)
+
+38. HADOOP-483. Improvements to libhdfs build and documentation.
+ (Arun C Murthy via cutting)
+
+39. HADOOP-458. Fix a memory corruption bug in libhdfs.
+ (Arun C Murthy via cutting)
+
+40. HADOOP-517. Fix a contrib/streaming bug in end-of-line detection.
+ (Hairong Kuang via cutting)
+
+41. HADOOP-474. Add CompressionCodecFactory, and use it in
+ TextInputFormat and TextOutputFormat. Compressed input files are
+ automatically decompressed when they have the correct extension.
+ Output files will, when output compression is specified, be
+ generated with an approprate extension. Also add a gzip codec and
+ fix problems with UTF8 text inputs. (omalley via cutting)
+
+
+Release 0.5.0 - 2006-08-04
+
+ 1. HADOOP-352. Fix shell scripts to use /bin/sh instead of
+ /bin/bash, for better portability.
+ (Jean-Baptiste Quenot via cutting)
+
+ 2. HADOOP-313. Permit task state to be saved so that single tasks
+ may be manually re-executed when debugging. (omalley via cutting)
+
+ 3. HADOOP-339. Add method to JobClient API listing jobs that are
+ not yet complete, i.e., that are queued or running.
+ (Mahadev Konar via cutting)
+
+ 4. HADOOP-355. Updates to the streaming contrib module, including
+ API fixes, making reduce optional, and adding an input type for
+ StreamSequenceRecordReader. (Michel Tourn via cutting)
+
+ 5. HADOOP-358. Fix a NPE bug in Path.equals().
+ (Fr??d??ric Bertin via cutting)
+
+ 6. HADOOP-327. Fix ToolBase to not call System.exit() when
+ exceptions are thrown. (Hairong Kuang via cutting)
+
+ 7. HADOOP-359. Permit map output to be compressed.
+ (omalley via cutting)
+
+ 8. HADOOP-341. Permit input URI to CopyFiles to use the HTTP
+ protocol. This lets one, e.g., more easily copy log files into
+ DFS. (Arun C Murthy via cutting)
+
+ 9. HADOOP-361. Remove unix dependencies from streaming contrib
+ module tests, making them pure java. (Michel Tourn via cutting)
+
+10. HADOOP-354. Make public methods to stop DFS daemons.
+ (Barry Kaplan via cutting)
+
+11. HADOOP-252. Add versioning to RPC protocols.
+ (Milind Bhandarkar via cutting)
+
+12. HADOOP-356. Add contrib to "compile" and "test" build targets, so
+ that this code is better maintained. (Michel Tourn via cutting)
+
+13. HADOOP-307. Add smallJobsBenchmark contrib module. This runs
+ lots of small jobs, in order to determine per-task overheads.
+ (Sanjay Dahiya via cutting)
+
+14. HADOOP-342. Add a tool for log analysis: Logalyzer.
+ (Arun C Murthy via cutting)
+
+15. HADOOP-347. Add web-based browsing of DFS content. The namenode
+ redirects browsing requests to datanodes. Content requests are
+ redirected to datanodes where the data is local when possible.
+ (Devaraj Das via cutting)
+
+16. HADOOP-351. Make Hadoop IPC kernel independent of Jetty.
+ (Devaraj Das via cutting)
+
+17. HADOOP-237. Add metric reporting to DFS and MapReduce. With only
+ minor configuration changes, one can now monitor many Hadoop
+ system statistics using Ganglia or other monitoring systems.
+ (Milind Bhandarkar via cutting)
+
+18. HADOOP-376. Fix datanode's HTTP server to scan for a free port.
+ (omalley via cutting)
+
+19. HADOOP-260. Add --config option to shell scripts, specifying an
+ alternate configuration directory. (Milind Bhandarkar via cutting)
+
+20. HADOOP-381. Permit developers to save the temporary files for
+ tasks whose names match a regular expression, to facilliate
+ debugging. (omalley via cutting)
+
+21. HADOOP-344. Fix some Windows-related problems with DF.
+ (Konstantin Shvachko via cutting)
+
+22. HADOOP-380. Fix reduce tasks to poll less frequently for map
+ outputs. (Mahadev Konar via cutting)
+
+23. HADOOP-321. Refactor DatanodeInfo, in preparation for
+ HADOOP-306. (Konstantin Shvachko & omalley via cutting)
+
+24. HADOOP-385. Fix some bugs in record io code generation.
+ (Milind Bhandarkar via cutting)
+
+25. HADOOP-302. Add new Text class to replace UTF8, removing
+ limitations of that class. Also refactor utility methods for
+ writing zero-compressed integers (VInts and VLongs).
+ (Hairong Kuang via cutting)
+
+26. HADOOP-335. Refactor DFS namespace/transaction logging in
+ namenode. (Konstantin Shvachko via cutting)
+
+27. HADOOP-375. Fix handling of the datanode HTTP daemon's port so
+ that multiple datanode's can be run on a single host.
+ (Devaraj Das via cutting)
+
+28. HADOOP-386. When removing excess DFS block replicas, remove those
+ on nodes with the least free space first.
+ (Johan Oskarson via cutting)
+
+29. HADOOP-389. Fix intermittent failures of mapreduce unit tests.
+ Also fix some build dependencies.
+ (Mahadev & Konstantin via cutting)
+
+30. HADOOP-362. Fix a problem where jobs hang when status messages
+ are recieved out-of-order. (omalley via cutting)
+
+31. HADOOP-394. Change order of DFS shutdown in unit tests to
+ minimize errors logged. (Konstantin Shvachko via cutting)
+
+32. HADOOP-396. Make DatanodeID implement Writable.
+ (Konstantin Shvachko via cutting)
+
+33. HADOOP-377. Permit one to add URL resources to a Configuration.
+ (Jean-Baptiste Quenot via cutting)
+
+34. HADOOP-345. Permit iteration over Configuration key/value pairs.
+ (Michel Tourn via cutting)
+
+35. HADOOP-409. Streaming contrib module: make configuration
+ properties available to commands as environment variables.
+ (Michel Tourn via cutting)
+
+36. HADOOP-369. Add -getmerge option to dfs command that appends all
+ files in a directory into a single local file.
+ (Johan Oskarson via cutting)
+
+37. HADOOP-410. Replace some TreeMaps with HashMaps in DFS, for
+ a 17% performance improvement. (Milind Bhandarkar via cutting)
+
+38. HADOOP-411. Add unit tests for command line parser.
+ (Hairong Kuang via cutting)
+
+39. HADOOP-412. Add MapReduce input formats that support filtering
+ of SequenceFile data, including sampling and regex matching.
+ Also, move JobConf.newInstance() to a new utility class.
+ (Hairong Kuang via cutting)
+
+40. HADOOP-226. Fix fsck command to properly consider replication
+ counts, now that these can vary per file. (Bryan Pendleton via cutting)
+
+41. HADOOP-425. Add a Python MapReduce example, using Jython.
+ (omalley via cutting)
+
+
+Release 0.4.0 - 2006-06-28
+
+ 1. HADOOP-298. Improved progress reports for CopyFiles utility, the
+ distributed file copier. (omalley via cutting)
+
+ 2. HADOOP-299. Fix the task tracker, permitting multiple jobs to
+ more easily execute at the same time. (omalley via cutting)
+
+ 3. HADOOP-250. Add an HTTP user interface to the namenode, running
+ on port 50070. (Devaraj Das via cutting)
+
+ 4. HADOOP-123. Add MapReduce unit tests that run a jobtracker and
+ tasktracker, greatly increasing code coverage.
+ (Milind Bhandarkar via cutting)
+
+ 5. HADOOP-271. Add links from jobtracker's web ui to tasktracker's
+ web ui. Also attempt to log a thread dump of child processes
+ before they're killed. (omalley via cutting)
+
+ 6. HADOOP-210. Change RPC server to use a selector instead of a
+ thread per connection. This should make it easier to scale to
+ larger clusters. Note that this incompatibly changes the RPC
+ protocol: clients and servers must both be upgraded to the new
+ version to ensure correct operation. (Devaraj Das via cutting)
+
+ 7. HADOOP-311. Change DFS client to retry failed reads, so that a
+ single read failure will not alone cause failure of a task.
+ (omalley via cutting)
+
+ 8. HADOOP-314. Remove the "append" phase when reducing. Map output
+ files are now directly passed to the sorter, without first
+ appending them into a single file. Now, the first third of reduce
+ progress is "copy" (transferring map output to reduce nodes), the
+ middle third is "sort" (sorting map output) and the last third is
+ "reduce" (generating output). Long-term, the "sort" phase will
+ also be removed. (omalley via cutting)
+
+ 9. HADOOP-316. Fix a potential deadlock in the jobtracker.
+ (omalley via cutting)
+
+10. HADOOP-319. Fix FileSystem.close() to remove the FileSystem
+ instance from the cache. (Hairong Kuang via cutting)
+
+11. HADOOP-135. Fix potential deadlock in JobTracker by acquiring
+ locks in a consistent order. (omalley via cutting)
+
+12. HADOOP-278. Check for existence of input directories before
+ starting MapReduce jobs, making it easier to debug this common
+ error. (omalley via cutting)
+
+13. HADOOP-304. Improve error message for
+ UnregisterdDatanodeException to include expected node name.
+ (Konstantin Shvachko via cutting)
+
+14. HADOOP-305. Fix TaskTracker to ask for new tasks as soon as a
+ task is finished, rather than waiting for the next heartbeat.
+ This improves performance when tasks are short.
+ (Mahadev Konar via cutting)
+
+15. HADOOP-59. Add support for generic command line options. One may
+ now specify the filesystem (-fs), the MapReduce jobtracker (-jt),
+ a config file (-conf) or any configuration property (-D). The
+ "dfs", "fsck", "job", and "distcp" commands currently support
+ this, with more to be added. (Hairong Kuang via cutting)
+
+16. HADOOP-296. Permit specification of the amount of reserved space
+ on a DFS datanode. One may specify both the percentage free and
+ the number of bytes. (Johan Oskarson via cutting)
+
+17. HADOOP-325. Fix a problem initializing RPC parameter classes, and
+ remove the workaround used to initialize classes.
+ (omalley via cutting)
+
+18. HADOOP-328. Add an option to the "distcp" command to ignore read
+ errors while copying. (omalley via cutting)
+
+19. HADOOP-27. Don't allocate tasks to trackers whose local free
+ space is too low. (Johan Oskarson via cutting)
+
+20. HADOOP-318. Keep slow DFS output from causing task timeouts.
+ This incompatibly changes some public interfaces, adding a
+ parameter to OutputFormat.getRecordWriter() and the new method
+ Reporter.progress(), but it makes lots of tasks succeed that were
+ previously failing. (Milind Bhandarkar via cutting)
+
+
+Release 0.3.2 - 2006-06-09
+
+ 1. HADOOP-275. Update the streaming contrib module to use log4j for
+ its logging. (Michel Tourn via cutting)
+
+ 2. HADOOP-279. Provide defaults for log4j logging parameters, so
+ that things still work reasonably when Hadoop-specific system
+ properties are not provided. (omalley via cutting)
+
+ 3. HADOOP-280. Fix a typo in AllTestDriver which caused the wrong
+ test to be run when "DistributedFSCheck" was specified.
+ (Konstantin Shvachko via cutting)
+
+ 4. HADOOP-240. DFS's mkdirs() implementation no longer logs a warning
+ when the directory already exists. (Hairong Kuang via cutting)
+
+ 5. HADOOP-285. Fix DFS datanodes to be able to re-join the cluster
+ after the connection to the namenode is lost. (omalley via cutting)
+
+ 6. HADOOP-277. Fix a race condition when creating directories.
+ (Sameer Paranjpye via cutting)
+
+ 7. HADOOP-289. Improved exception handling in DFS datanode.
+ (Konstantin Shvachko via cutting)
+
+ 8. HADOOP-292. Fix client-side logging to go to standard error
+ rather than standard output, so that it can be distinguished from
+ application output. (omalley via cutting)
+
+ 9. HADOOP-294. Fixed bug where conditions for retrying after errors
+ in the DFS client were reversed. (omalley via cutting)
+
+
+Release 0.3.1 - 2006-06-05
+
+ 1. HADOOP-272. Fix a bug in bin/hadoop setting log
+ parameters. (omalley & cutting)
+
+ 2. HADOOP-274. Change applications to log to standard output rather
+ than to a rolling log file like daemons. (omalley via cutting)
+
+ 3. HADOOP-262. Fix reduce tasks to report progress while they're
+ waiting for map outputs, so that they do not time out.
+ (Mahadev Konar via cutting)
+
+ 4. HADOOP-245 and HADOOP-246. Improvements to record io package.
+ (Mahadev Konar via cutting)
+
+ 5. HADOOP-276. Add logging config files to jar file so that they're
+ always found. (omalley via cutting)
+
+
+Release 0.3.0 - 2006-06-02
+
+ 1. HADOOP-208. Enhance MapReduce web interface, adding new pages
+ for failed tasks, and tasktrackers. (omalley via cutting)
+
+ 2. HADOOP-204. Tweaks to metrics package. (David Bowen via cutting)
+
+ 3. HADOOP-209. Add a MapReduce-based file copier. This will
+ copy files within or between file systems in parallel.
+ (Milind Bhandarkar via cutting)
+
+ 4. HADOOP-146. Fix DFS to check when randomly generating a new block
+ id that no existing blocks already have that id.
+ (Milind Bhandarkar via cutting)
+
+ 5. HADOOP-180. Make a daemon thread that does the actual task clean ups, so
+ that the main offerService thread in the taskTracker doesn't get stuck
+ and miss his heartbeat window. This was killing many task trackers as
+ big jobs finished (300+ tasks / node). (omalley via cutting)
+
+ 6. HADOOP-200. Avoid transmitting entire list of map task names to
+ reduce tasks. Instead just transmit the number of map tasks and
+ henceforth refer to them by number when collecting map output.
+ (omalley via cutting)
+
+ 7. HADOOP-219. Fix a NullPointerException when handling a checksum
+ exception under SequenceFile.Sorter.sort(). (cutting & stack)
+
+ 8. HADOOP-212. Permit alteration of the file block size in DFS. The
+ default block size for new files may now be specified in the
+ configuration with the dfs.block.size property. The block size
+ may also be specified when files are opened.
+ (omalley via cutting)
+
+ 9. HADOOP-218. Avoid accessing configuration while looping through
+ tasks in JobTracker. (Mahadev Konar via cutting)
+
+10. HADOOP-161. Add hashCode() method to DFS's Block.
+ (Milind Bhandarkar via cutting)
+
+11. HADOOP-115. Map output types may now be specified. These are also
+ used as reduce input types, thus permitting reduce input types to
+ differ from reduce output types. (Runping Qi via cutting)
+
+12. HADOOP-216. Add task progress to task status page.
+ (Bryan Pendelton via cutting)
+
+13. HADOOP-233. Add web server to task tracker that shows running
+ tasks and logs. Also add log access to job tracker web interface.
+ (omalley via cutting)
+
+14. HADOOP-205. Incorporate pending tasks into tasktracker load
+ calculations. (Mahadev Konar via cutting)
+
+15. HADOOP-247. Fix sort progress to better handle exceptions.
+ (Mahadev Konar via cutting)
+
+16. HADOOP-195. Improve performance of the transfer of map outputs to
+ reduce nodes by performing multiple transfers in parallel, each on
+ a separate socket. (Sameer Paranjpye via cutting)
+
+17. HADOOP-251. Fix task processes to be tolerant of failed progress
+ reports to their parent process. (omalley via cutting)
+
+18. HADOOP-325. Improve the FileNotFound exceptions thrown by
+ LocalFileSystem to include the name of the file.
+ (Benjamin Reed via cutting)
+
+19. HADOOP-254. Use HTTP to transfer map output data to reduce
+ nodes. This, together with HADOOP-195, greatly improves the
+ performance of these transfers. (omalley via cutting)
+
+20. HADOOP-163. Cause datanodes that\ are unable to either read or
+ write data to exit, so that the namenode will no longer target
+ them for new blocks and will replicate their data on other nodes.
+ (Hairong Kuang via cutting)
+
+21. HADOOP-222. Add a -setrep option to the dfs commands that alters
+ file replication levels. (Johan Oskarson via cutting)
+
+22. HADOOP-75. In DFS, only check for a complete file when the file
+ is closed, rather than as each block is written.
+ (Milind Bhandarkar via cutting)
+
+23. HADOOP-124. Change DFS so that datanodes are identified by a
+ persistent ID rather than by host and port. This solves a number
+ of filesystem integrity problems, when, e.g., datanodes are
+ restarted. (Konstantin Shvachko via cutting)
+
+24. HADOOP-256. Add a C API for DFS. (Arun C Murthy via cutting)
+
+25. HADOOP-211. Switch to use the Jakarta Commons logging internally,
+ configured to use log4j by default. (Arun C Murthy and cutting)
+
+26. HADOOP-265. Tasktracker now fails to start if it does not have a
+ writable local directory for temporary files. In this case, it
+ logs a message to the JobTracker and exits. (Hairong Kuang via cutting)
+
+27. HADOOP-270. Fix potential deadlock in datanode shutdown.
+ (Hairong Kuang via cutting)
+
+Release 0.2.1 - 2006-05-12
+
+ 1. HADOOP-199. Fix reduce progress (broken by HADOOP-182).
+ (omalley via cutting)
+
+ 2. HADOOP-201. Fix 'bin/hadoop dfs -report'. (cutting)
+
+ 3. HADOOP-207. Fix JDK 1.4 incompatibility introduced by HADOOP-96.
+ System.getenv() does not work in JDK 1.4. (Hairong Kuang via cutting)
+
+
+Release 0.2.0 - 2006-05-05
+
+ 1. Fix HADOOP-126. 'bin/hadoop dfs -cp' now correctly copies .crc
+ files. (Konstantin Shvachko via cutting)
+
+ 2. Fix HADOOP-51. Change DFS to support per-file replication counts.
+ (Konstantin Shvachko via cutting)
+
+ 3. Fix HADOOP-131. Add scripts to start/stop dfs and mapred daemons.
+ Use these in start/stop-all scripts. (Chris Mattmann via cutting)
+
+ 4. Stop using ssh options by default that are not yet in widely used
+ versions of ssh. Folks can still enable their use by uncommenting
+ a line in conf/hadoop-env.sh. (cutting)
+
+ 5. Fix HADOOP-92. Show information about all attempts to run each
+ task in the web ui. (Mahadev konar via cutting)
+
+ 6. Fix HADOOP-128. Improved DFS error handling. (Owen O'Malley via cutting)
+
+ 7. Fix HADOOP-129. Replace uses of java.io.File with new class named
+ Path. This fixes bugs where java.io.File methods were called
+ directly when FileSystem methods were desired, and reduces the
+ likelihood of such bugs in the future. It also makes the handling
+ of pathnames more consistent between local and dfs FileSystems and
+ between Windows and Unix. java.io.File-based methods are still
+ available for back-compatibility, but are deprecated and will be
+ removed once 0.2 is released. (cutting)
+
+ 8. Change dfs.data.dir and mapred.local.dir to be comma-separated
+ lists of directories, no longer be space-separated. This fixes
+ several bugs on Windows. (cutting)
+
+ 9. Fix HADOOP-144. Use mapred task id for dfs client id, to
+ facilitate debugging. (omalley via cutting)
+
+10. Fix HADOOP-143. Do not line-wrap stack-traces in web ui.
+ (omalley via cutting)
+
+11. Fix HADOOP-118. In DFS, improve clean up of abandoned file
+ creations. (omalley via cutting)
+
+12. Fix HADOOP-138. Stop multiple tasks in a single heartbeat, rather
+ than one per heartbeat. (Stefan via cutting)
+
+13. Fix HADOOP-139. Remove a potential deadlock in
+ LocalFileSystem.lock(). (Igor Bolotin via cutting)
+
+14. Fix HADOOP-134. Don't hang jobs when the tasktracker is
+ misconfigured to use an un-writable local directory. (omalley via cutting)
+
+15. Fix HADOOP-115. Correct an error message. (Stack via cutting)
+
+16. Fix HADOOP-133. Retry pings from child to parent, in case of
+ (local) communcation problems. Also log exit status, so that one
+ can distinguish patricide from other deaths. (omalley via cutting)
+
+17. Fix HADOOP-142. Avoid re-running a task on a host where it has
+ previously failed. (omalley via cutting)
+
+18. Fix HADOOP-148. Maintain a task failure count for each
+ tasktracker and display it in the web ui. (omalley via cutting)
+
+19. Fix HADOOP-151. Close a potential socket leak, where new IPC
+ connection pools were created per configuration instance that RPCs
+ use. Now a global RPC connection pool is used again, as
+ originally intended. (cutting)
+
+20. Fix HADOOP-69. Don't throw a NullPointerException when getting
+ hints for non-existing file split. (Bryan Pendelton via cutting)
+
+21. Fix HADOOP-157. When a task that writes dfs files (e.g., a reduce
+ task) failed and was retried, it would fail again and again,
+ eventually failing the job. The problem was that dfs did not yet
+ know that the failed task had abandoned the files, and would not
+ yet let another task create files with the same names. Dfs now
+ retries when creating a file long enough for locks on abandoned
+ files to expire. (omalley via cutting)
+
+22. Fix HADOOP-150. Improved task names that include job
+ names. (omalley via cutting)
+
+23. Fix HADOOP-162. Fix ConcurrentModificationException when
+ releasing file locks. (omalley via cutting)
+
+24. Fix HADOOP-132. Initial check-in of new Metrics API, including
+ implementations for writing metric data to a file and for sending
+ it to Ganglia. (David Bowen via cutting)
+
+25. Fix HADOOP-160. Remove some uneeded synchronization around
+ time-consuming operations in the TaskTracker. (omalley via cutting)
+
+26. Fix HADOOP-166. RPCs failed when passed subclasses of a declared
+ parameter type. This is fixed by changing ObjectWritable to store
+ both the declared type and the instance type for Writables. Note
+ that this incompatibly changes the format of ObjectWritable and
+ will render unreadable any ObjectWritables stored in files.
+ Nutch only uses ObjectWritable in intermediate files, so this
+ should not be a problem for Nutch. (Stefan & cutting)
+
+27. Fix HADOOP-168. MapReduce RPC protocol methods should all declare
+ IOException, so that timeouts are handled appropriately.
+ (omalley via cutting)
+
+28. Fix HADOOP-169. Don't fail a reduce task if a call to the
+ jobtracker to locate map outputs fails. (omalley via cutting)
+
+29. Fix HADOOP-170. Permit FileSystem clients to examine and modify
+ the replication count of individual files. Also fix a few
+ replication-related bugs. (Konstantin Shvachko via cutting)
+
+30. Permit specification of a higher replication levels for job
+ submission files (job.xml and job.jar). This helps with large
+ clusters, since these files are read by every node. (cutting)
+
+31. HADOOP-173. Optimize allocation of tasks with local data. (cutting)
+
+32. HADOOP-167. Reduce number of Configurations and JobConf's
+ created. (omalley via cutting)
+
+33. NUTCH-256. Change FileSystem#createNewFile() to create a .crc
+ file. The lack of a .crc file was causing warnings. (cutting)
+
+34. HADOOP-174. Change JobClient to not abort job until it has failed
+ to contact the job tracker for five attempts, not just one as
+ before. (omalley via cutting)
+
+35. HADOOP-177. Change MapReduce web interface to page through tasks.
+ Previously, when jobs had more than a few thousand tasks they
+ could crash web browsers. (Mahadev Konar via cutting)
+
+36. HADOOP-178. In DFS, piggyback blockwork requests from datanodes
+ on heartbeat responses from namenode. This reduces the volume of
+ RPC traffic. Also move startup delay in blockwork from datanode
+ to namenode. This fixes a problem where restarting the namenode
+ triggered a lot of uneeded replication. (Hairong Kuang via cutting)
+
+37. HADOOP-183. If the DFS namenode is restarted with different
+ minimum and/or maximum replication counts, existing files'
+ replication counts are now automatically adjusted to be within the
+ newly configured bounds. (Hairong Kuang via cutting)
+
+38. HADOOP-186. Better error handling in TaskTracker's top-level
+ loop. Also improve calculation of time to send next heartbeat.
+ (omalley via cutting)
+
+39. HADOOP-187. Add two MapReduce examples/benchmarks. One creates
+ files containing random data. The second sorts the output of the
+ first. (omalley via cutting)
+
+40. HADOOP-185. Fix so that, when a task tracker times out making the
+ RPC asking for a new task to run, the job tracker does not think
+ that it is actually running the task returned. (omalley via cutting)
+
+41. HADOOP-190. If a child process hangs after it has reported
+ completion, its output should not be lost. (Stack via cutting)
+
+42. HADOOP-184. Re-structure some test code to better support testing
+ on a cluster. (Mahadev Konar via cutting)
+
+43. HADOOP-191 Add streaming package, Hadoop's first contrib module.
+ This permits folks to easily submit MapReduce jobs whose map and
+ reduce functions are implemented by shell commands. Use
+ 'bin/hadoop jar build/hadoop-streaming.jar' to get details.
+ (Michel Tourn via cutting)
+
+44. HADOOP-189. Fix MapReduce in standalone configuration to
+ correctly handle job jar files that contain a lib directory with
+ nested jar files. (cutting)
+
+45. HADOOP-65. Initial version of record I/O framework that enables
+ the specification of record types and generates marshalling code
+ in both Java and C++. Generated Java code implements
+ WritableComparable, but is not yet otherwise used by
+ Hadoop. (Milind Bhandarkar via cutting)
+
+46. HADOOP-193. Add a MapReduce-based FileSystem benchmark.
+ (Konstantin Shvachko via cutting)
+
+47. HADOOP-194. Add a MapReduce-based FileSystem checker. This reads
+ every block in every file in the filesystem. (Konstantin Shvachko
+ via cutting)
+
+48. HADOOP-182. Fix so that lost task trackers to not change the
+ status of reduce tasks or completed jobs. Also fixes the progress
+ meter so that failed tasks are subtracted. (omalley via cutting)
+
+49. HADOOP-96. Logging improvements. Log files are now separate from
+ standard output and standard error files. Logs are now rolled.
+ Logging of all DFS state changes can be enabled, to facilitate
+ debugging. (Hairong Kuang via cutting)
+
+
+Release 0.1.1 - 2006-04-08
+
+ 1. Added CHANGES.txt, logging all significant changes to Hadoop. (cutting)
+
+ 2. Fix MapReduceBase.close() to throw IOException, as declared in the
+ Closeable interface. This permits subclasses which override this
+ method to throw that exception. (cutting)
+
+ 3. Fix HADOOP-117. Pathnames were mistakenly transposed in
+ JobConf.getLocalFile() causing many mapred temporary files to not
+ be removed. (Raghavendra Prabhu via cutting)
+
+ 4. Fix HADOOP-116. Clean up job submission files when jobs complete.
+ (cutting)
+
+ 5. Fix HADOOP-125. Fix handling of absolute paths on Windows (cutting)
+
+Release 0.1.0 - 2006-04-01
+
+ 1. The first release of Hadoop.
+
diff --git a/lib/hadoop-0.20.0/LICENSE.txt b/lib/hadoop-0.20.0/LICENSE.txt
new file mode 100644
index 0000000000..59bcdbc978
--- /dev/null
+++ b/lib/hadoop-0.20.0/LICENSE.txt
@@ -0,0 +1,244 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+APACHE HADOOP SUBCOMPONENTS:
+
+The Apache Hadoop project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses.
+
+For the org.apache.hadoop.util.bloom.* classes:
+
+/**
+ *
+ * Copyright (c) 2005, European Commission project OneLab under contract
+ * 034819 (http://www.one-lab.org)
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ * - Neither the name of the University Catholique de Louvain - UCL
+ * nor the names of its contributors may be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/lib/hadoop-0.20.0/NOTICE.txt b/lib/hadoop-0.20.0/NOTICE.txt
new file mode 100644
index 0000000000..62fc5816c9
--- /dev/null
+++ b/lib/hadoop-0.20.0/NOTICE.txt
@@ -0,0 +1,2 @@
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
diff --git a/lib/hadoop-0.20.0/README.txt b/lib/hadoop-0.20.0/README.txt
new file mode 100644
index 0000000000..148cd31c86
--- /dev/null
+++ b/lib/hadoop-0.20.0/README.txt
@@ -0,0 +1,31 @@
+For the latest information about Hadoop, please visit our website at:
+
+ http://hadoop.apache.org/core/
+
+and our wiki, at:
+
+ http://wiki.apache.org/hadoop/
+
+This distribution includes cryptographic software. The country in
+which you currently reside may have restrictions on the import,
+possession, use, and/or re-export to another country, of
+encryption software. BEFORE using any encryption software, please
+check your country's laws, regulations and policies concerning the
+import, possession, or use, and re-export of encryption software, to
+see if this is permitted. See <http://www.wassenaar.org/> for more
+information.
+
+The U.S. Government Department of Commerce, Bureau of Industry and
+Security (BIS), has classified this software as Export Commodity
+Control Number (ECCN) 5D002.C.1, which includes information security
+software using or performing cryptographic functions with asymmetric
+algorithms. The form and manner of this Apache Software Foundation
+distribution makes it eligible for export under the License Exception
+ENC Technology Software Unrestricted (TSU) exception (see the BIS
+Export Administration Regulations, Section 740.13) for both object
+code and source code.
+
+The following provides more details on the included cryptographic
+software:
+ Hadoop Core uses the SSL libraries from the Jetty project written
+by mortbay.org.
diff --git a/lib/hadoop-0.20.0/bin/hadoop b/lib/hadoop-0.20.0/bin/hadoop
new file mode 100755
index 0000000000..273549f138
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/hadoop
@@ -0,0 +1,289 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# The Hadoop command script
+#
+# Environment Variables
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# HADOOP_CLASSPATH Extra Java CLASSPATH entries.
+#
+# HADOOP_HEAPSIZE The maximum amount of heap to use, in MB.
+# Default is 1000.
+#
+# HADOOP_OPTS Extra Java runtime options.
+#
+# HADOOP_NAMENODE_OPTS These options are added to HADOOP_OPTS
+# HADOOP_CLIENT_OPTS when the respective command is run.
+# HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker
+# for e.g. HADOOP_CLIENT_OPTS applies to
+# more than one command (fs, dfs, fsck,
+# dfsadmin etc)
+#
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+#
+# HADOOP_ROOT_LOGGER The root appender. Default is INFO,console
+#
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+cygwin=false
+case "`uname`" in
+CYGWIN*) cygwin=true;;
+esac
+
+# if no args specified, show usage
+if [ $# = 0 ]; then
+ echo "Usage: hadoop [--config confdir] COMMAND"
+ echo "where COMMAND is one of:"
+ echo " namenode -format format the DFS filesystem"
+ echo " secondarynamenode run the DFS secondary namenode"
+ echo " namenode run the DFS namenode"
+ echo " datanode run a DFS datanode"
+ echo " dfsadmin run a DFS admin client"
+ echo " mradmin run a Map-Reduce admin client"
+ echo " fsck run a DFS filesystem checking utility"
+ echo " fs run a generic filesystem user client"
+ echo " balancer run a cluster balancing utility"
+ echo " jobtracker run the MapReduce job Tracker node"
+ echo " pipes run a Pipes job"
+ echo " tasktracker run a MapReduce task Tracker node"
+ echo " job manipulate MapReduce jobs"
+ echo " queue get information regarding JobQueues"
+ echo " version print the version"
+ echo " jar <jar> run a jar file"
+ echo " distcp <srcurl> <desturl> copy file or directories recursively"
+ echo " archive -archiveName NAME <src>* <dest> create a hadoop archive"
+ echo " daemonlog get/set the log level for each daemon"
+ echo " or"
+ echo " CLASSNAME run the class named CLASSNAME"
+ echo "Most commands print help when invoked w/o parameters."
+ exit 1
+fi
+
+# get arguments
+COMMAND=$1
+shift
+
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
+# some Java parameters
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# check envvars which might override default args
+if [ "$HADOOP_HEAPSIZE" != "" ]; then
+ #echo "run with heapsize $HADOOP_HEAPSIZE"
+ JAVA_HEAP_MAX="-Xmx""$HADOOP_HEAPSIZE""m"
+ #echo $JAVA_HEAP_MAX
+fi
+
+# CLASSPATH initially contains $HADOOP_CONF_DIR
+CLASSPATH="${HADOOP_CONF_DIR}"
+CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
+
+# for developers, add Hadoop classes to CLASSPATH
+if [ -d "$HADOOP_HOME/build/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes
+fi
+if [ -d "$HADOOP_HOME/build/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build
+fi
+if [ -d "$HADOOP_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes
+fi
+if [ -d "$HADOOP_HOME/build/tools" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/tools
+fi
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+# for releases, add core hadoop jar & webapps to CLASSPATH
+if [ -d "$HADOOP_HOME/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME
+fi
+for f in $HADOOP_HOME/hadoop-*-core.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add libs to CLASSPATH
+for f in $HADOOP_HOME/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+if [ -d "$HADOOP_HOME/build/ivy/lib/Hadoop/common" ]; then
+for f in $HADOOP_HOME/build/ivy/lib/Hadoop/common/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+fi
+
+for f in $HADOOP_HOME/lib/jsp-2.1/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+for f in $HADOOP_HOME/hadoop-*-tools.jar; do
+ TOOL_PATH=${TOOL_PATH}:$f;
+done
+for f in $HADOOP_HOME/build/hadoop-*-tools.jar; do
+ TOOL_PATH=${TOOL_PATH}:$f;
+done
+
+# add user-specified CLASSPATH last
+if [ "$HADOOP_CLASSPATH" != "" ]; then
+ CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}
+fi
+
+# default log directory & file
+if [ "$HADOOP_LOG_DIR" = "" ]; then
+ HADOOP_LOG_DIR="$HADOOP_HOME/logs"
+fi
+if [ "$HADOOP_LOGFILE" = "" ]; then
+ HADOOP_LOGFILE='hadoop.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$HADOOP_POLICYFILE" = "" ]; then
+ HADOOP_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+# figure out which class to run
+if [ "$COMMAND" = "namenode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
+elif [ "$COMMAND" = "secondarynamenode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
+elif [ "$COMMAND" = "datanode" ] ; then
+ CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_DATANODE_OPTS"
+elif [ "$COMMAND" = "fs" ] ; then
+ CLASS=org.apache.hadoop.fs.FsShell
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "dfs" ] ; then
+ CLASS=org.apache.hadoop.fs.FsShell
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "dfsadmin" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "mradmin" ] ; then
+ CLASS=org.apache.hadoop.mapred.tools.MRAdmin
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "fsck" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.DFSck
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "balancer" ] ; then
+ CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
+elif [ "$COMMAND" = "jobtracker" ] ; then
+ CLASS=org.apache.hadoop.mapred.JobTracker
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_JOBTRACKER_OPTS"
+elif [ "$COMMAND" = "tasktracker" ] ; then
+ CLASS=org.apache.hadoop.mapred.TaskTracker
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_TASKTRACKER_OPTS"
+elif [ "$COMMAND" = "job" ] ; then
+ CLASS=org.apache.hadoop.mapred.JobClient
+elif [ "$COMMAND" = "queue" ] ; then
+ CLASS=org.apache.hadoop.mapred.JobQueueClient
+elif [ "$COMMAND" = "pipes" ] ; then
+ CLASS=org.apache.hadoop.mapred.pipes.Submitter
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "version" ] ; then
+ CLASS=org.apache.hadoop.util.VersionInfo
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "jar" ] ; then
+ CLASS=org.apache.hadoop.util.RunJar
+elif [ "$COMMAND" = "distcp" ] ; then
+ CLASS=org.apache.hadoop.tools.DistCp
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "daemonlog" ] ; then
+ CLASS=org.apache.hadoop.log.LogLevel
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "archive" ] ; then
+ CLASS=org.apache.hadoop.tools.HadoopArchives
+ CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "sampler" ] ; then
+ CLASS=org.apache.hadoop.mapred.lib.InputSampler
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+else
+ CLASS=$COMMAND
+fi
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+ HADOOP_HOME=`cygpath -w "$HADOOP_HOME"`
+ HADOOP_LOG_DIR=`cygpath -w "$HADOOP_LOG_DIR"`
+ TOOL_PATH=`cygpath -p -w "$TOOL_PATH"`
+fi
+# setup 'java.library.path' for native-hadoop code if necessary
+JAVA_LIBRARY_PATH=''
+if [ -d "${HADOOP_HOME}/build/native" -o -d "${HADOOP_HOME}/lib/native" ]; then
+ JAVA_PLATFORM=`CLASSPATH=${CLASSPATH} ${JAVA} -Xmx32m org.apache.hadoop.util.PlatformName | sed -e "s/ /_/g"`
+
+ if [ -d "$HADOOP_HOME/build/native" ]; then
+ JAVA_LIBRARY_PATH=${HADOOP_HOME}/build/native/${JAVA_PLATFORM}/lib
+ fi
+
+ if [ -d "${HADOOP_HOME}/lib/native" ]; then
+ if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
+ else
+ JAVA_LIBRARY_PATH=${HADOOP_HOME}/lib/native/${JAVA_PLATFORM}
+ fi
+ fi
+fi
+
+# cygwin path translation
+if $cygwin; then
+ JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
+fi
+
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_HOME"
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE"
+
+# run it
+exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/lib/hadoop-0.20.0/bin/hadoop-config.sh b/lib/hadoop-0.20.0/bin/hadoop-config.sh
new file mode 100755
index 0000000000..1f9d52da79
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/hadoop-config.sh
@@ -0,0 +1,68 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# included in all the hadoop scripts with source command
+# should not be executable directly
+# also should not be passed any arguments, since we need original $*
+
+# resolve links - $0 may be a softlink
+
+this="$0"
+while [ -h "$this" ]; do
+ ls=`ls -ld "$this"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '.*/.*' > /dev/null; then
+ this="$link"
+ else
+ this=`dirname "$this"`/"$link"
+ fi
+done
+
+# convert relative path to absolute path
+bin=`dirname "$this"`
+script=`basename "$this"`
+bin=`cd "$bin"; pwd`
+this="$bin/$script"
+
+# the root of the Hadoop installation
+export HADOOP_HOME=`dirname "$this"`/..
+
+#check to see if the conf dir is given as an optional argument
+if [ $# -gt 1 ]
+then
+ if [ "--config" = "$1" ]
+ then
+ shift
+ confdir=$1
+ shift
+ HADOOP_CONF_DIR=$confdir
+ fi
+fi
+
+# Allow alternate conf dir location.
+HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}"
+
+#check to see it is specified whether to use the slaves or the
+# masters file
+if [ $# -gt 1 ]
+then
+ if [ "--hosts" = "$1" ]
+ then
+ shift
+ slavesfile=$1
+ shift
+ export HADOOP_SLAVES="${HADOOP_CONF_DIR}/$slavesfile"
+ fi
+fi
diff --git a/lib/hadoop-0.20.0/bin/hadoop-daemon.sh b/lib/hadoop-0.20.0/bin/hadoop-daemon.sh
new file mode 100755
index 0000000000..e10390a9e2
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/hadoop-daemon.sh
@@ -0,0 +1,143 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Runs a Hadoop command as a daemon.
+#
+# Environment Variables
+#
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+# HADOOP_LOG_DIR Where log files are stored. PWD by default.
+# HADOOP_MASTER host:path where hadoop code should be rsync'd from
+# HADOOP_PID_DIR The pid files are stored. /tmp by default.
+# HADOOP_IDENT_STRING A string representing this instance of hadoop. $USER by default
+# HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+usage="Usage: hadoop-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] (start|stop) <hadoop-command> <args...>"
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+# get arguments
+startStop=$1
+shift
+command=$1
+shift
+
+hadoop_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv "$log" "$log.$num";
+ fi
+}
+
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
+# get log directory
+if [ "$HADOOP_LOG_DIR" = "" ]; then
+ export HADOOP_LOG_DIR="$HADOOP_HOME/logs"
+fi
+mkdir -p "$HADOOP_LOG_DIR"
+
+if [ "$HADOOP_PID_DIR" = "" ]; then
+ HADOOP_PID_DIR=/tmp
+fi
+
+if [ "$HADOOP_IDENT_STRING" = "" ]; then
+ export HADOOP_IDENT_STRING="$USER"
+fi
+
+# some variables
+export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
+export HADOOP_ROOT_LOGGER="INFO,DRFA"
+log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
+pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
+
+# Set default scheduling priority
+if [ "$HADOOP_NICENESS" = "" ]; then
+ export HADOOP_NICENESS=0
+fi
+
+case $startStop in
+
+ (start)
+
+ mkdir -p "$HADOOP_PID_DIR"
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo $command running as process `cat $pid`. Stop it first.
+ exit 1
+ fi
+ fi
+
+ if [ "$HADOOP_MASTER" != "" ]; then
+ echo rsync from $HADOOP_MASTER
+ rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HADOOP_MASTER/ "$HADOOP_HOME"
+ fi
+
+ hadoop_rotate_log $log
+ echo starting $command, logging to $log
+ cd "$HADOOP_HOME"
+ nohup nice -n $HADOOP_NICENESS "$HADOOP_HOME"/bin/hadoop --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+ echo $! > $pid
+ sleep 1; head "$log"
+ ;;
+
+ (stop)
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo stopping $command
+ kill `cat $pid`
+ else
+ echo no $command to stop
+ fi
+ else
+ echo no $command to stop
+ fi
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+
+esac
+
+
diff --git a/lib/hadoop-0.20.0/bin/hadoop-daemons.sh b/lib/hadoop-0.20.0/bin/hadoop-daemons.sh
new file mode 100755
index 0000000000..894d8ab11a
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/hadoop-daemons.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a Hadoop command on all slave hosts.
+
+usage="Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] command args..."
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. $bin/hadoop-config.sh
+
+exec "$bin/slaves.sh" --config $HADOOP_CONF_DIR cd "$HADOOP_HOME" \; "$bin/hadoop-daemon.sh" --config $HADOOP_CONF_DIR "$@"
diff --git a/lib/hadoop-0.20.0/bin/rcc b/lib/hadoop-0.20.0/bin/rcc
new file mode 100755
index 0000000000..a39745be6f
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/rcc
@@ -0,0 +1,99 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# The Hadoop record compiler
+#
+# Environment Variables
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# HADOOP_OPTS Extra Java runtime options.
+#
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+#
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
+# some Java parameters
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# CLASSPATH initially contains $HADOOP_CONF_DIR
+CLASSPATH="${HADOOP_CONF_DIR}"
+CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
+
+# for developers, add Hadoop classes to CLASSPATH
+if [ -d "$HADOOP_HOME/build/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/classes
+fi
+if [ -d "$HADOOP_HOME/build/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build
+fi
+if [ -d "$HADOOP_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/test/classes
+fi
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+# for releases, add core hadoop jar & webapps to CLASSPATH
+if [ -d "$HADOOP_HOME/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME
+fi
+for f in $HADOOP_HOME/hadoop-*-core.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add libs to CLASSPATH
+for f in $HADOOP_HOME/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+for f in $HADOOP_HOME/lib/jetty-ext/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# restore ordinary behaviour
+unset IFS
+
+CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
+
+# cygwin path translation
+if expr `uname` : 'CYGWIN*' > /dev/null; then
+ CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+fi
+
+# run it
+exec "$JAVA" $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/lib/hadoop-0.20.0/bin/slaves.sh b/lib/hadoop-0.20.0/bin/slaves.sh
new file mode 100755
index 0000000000..fc9f720be7
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/slaves.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a shell command on all slave hosts.
+#
+# Environment Variables
+#
+# HADOOP_SLAVES File naming remote hosts.
+# Default is ${HADOOP_CONF_DIR}/slaves.
+# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
+##
+
+usage="Usage: slaves.sh [--config confdir] command..."
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+# If the slaves file is specified in the command line,
+# then it takes precedence over the definition in
+# hadoop-env.sh. Save it here.
+HOSTLIST=$HADOOP_SLAVES
+
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+ . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
+if [ "$HOSTLIST" = "" ]; then
+ if [ "$HADOOP_SLAVES" = "" ]; then
+ export HOSTLIST="${HADOOP_CONF_DIR}/slaves"
+ else
+ export HOSTLIST="${HADOOP_SLAVES}"
+ fi
+fi
+
+for slave in `cat "$HOSTLIST"|sed "s/#.*$//;/^$/d"`; do
+ ssh $HADOOP_SSH_OPTS $slave $"${@// /\\ }" \
+ 2>&1 | sed "s/^/$slave: /" &
+ if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then
+ sleep $HADOOP_SLAVE_SLEEP
+ fi
+done
+
+wait
diff --git a/lib/hadoop-0.20.0/bin/start-all.sh b/lib/hadoop-0.20.0/bin/start-all.sh
new file mode 100755
index 0000000000..b1eefc8fbe
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/start-all.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start all hadoop daemons. Run this on master node.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+# start dfs daemons
+"$bin"/start-dfs.sh --config $HADOOP_CONF_DIR
+
+# start mapred daemons
+"$bin"/start-mapred.sh --config $HADOOP_CONF_DIR
diff --git a/lib/hadoop-0.20.0/bin/start-balancer.sh b/lib/hadoop-0.20.0/bin/start-balancer.sh
new file mode 100755
index 0000000000..e8c93f90ca
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/start-balancer.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+# Start balancer daemon.
+
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start balancer $@
diff --git a/lib/hadoop-0.20.0/bin/start-dfs.sh b/lib/hadoop-0.20.0/bin/start-dfs.sh
new file mode 100755
index 0000000000..bda2035a2b
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/start-dfs.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start hadoop dfs daemons.
+# Optinally upgrade or rollback dfs state.
+# Run this on master node.
+
+usage="Usage: start-dfs.sh [-upgrade|-rollback]"
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+# get arguments
+if [ $# -ge 1 ]; then
+ nameStartOpt=$1
+ shift
+ case $nameStartOpt in
+ (-upgrade)
+ ;;
+ (-rollback)
+ dataStartOpt=$nameStartOpt
+ ;;
+ (*)
+ echo $usage
+ exit 1
+ ;;
+ esac
+fi
+
+# start dfs daemons
+# start namenode after datanodes, to minimize time namenode is up w/o data
+# note: datanodes will log connection errors until namenode starts
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start namenode $nameStartOpt
+"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start datanode $dataStartOpt
+"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters start secondarynamenode
diff --git a/lib/hadoop-0.20.0/bin/start-mapred.sh b/lib/hadoop-0.20.0/bin/start-mapred.sh
new file mode 100755
index 0000000000..b64c8f51d8
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/start-mapred.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start hadoop map reduce daemons. Run this on master node.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+# start mapred daemons
+# start jobtracker first to minimize connection errors at startup
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start jobtracker
+"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start tasktracker
diff --git a/lib/hadoop-0.20.0/bin/stop-all.sh b/lib/hadoop-0.20.0/bin/stop-all.sh
new file mode 100755
index 0000000000..033f2fe8d8
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/stop-all.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Stop all hadoop daemons. Run this on master node.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+"$bin"/stop-mapred.sh --config $HADOOP_CONF_DIR
+"$bin"/stop-dfs.sh --config $HADOOP_CONF_DIR
diff --git a/lib/hadoop-0.20.0/bin/stop-balancer.sh b/lib/hadoop-0.20.0/bin/stop-balancer.sh
new file mode 100755
index 0000000000..483a9c2549
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/stop-balancer.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+# Stop balancer daemon.
+# Run this on the machine where the balancer is running
+
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop balancer
diff --git a/lib/hadoop-0.20.0/bin/stop-dfs.sh b/lib/hadoop-0.20.0/bin/stop-dfs.sh
new file mode 100755
index 0000000000..14fe61d17e
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/stop-dfs.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Stop hadoop DFS daemons. Run this on master node.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop namenode
+"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR stop datanode
+"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters stop secondarynamenode
+
diff --git a/lib/hadoop-0.20.0/bin/stop-mapred.sh b/lib/hadoop-0.20.0/bin/stop-mapred.sh
new file mode 100755
index 0000000000..aa51c1f87b
--- /dev/null
+++ b/lib/hadoop-0.20.0/bin/stop-mapred.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Stop hadoop map reduce daemons. Run this on master node.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop jobtracker
+"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR stop tasktracker
+
diff --git a/lib/hadoop-0.20.0/build.xml b/lib/hadoop-0.20.0/build.xml
new file mode 100644
index 0000000000..68932d7d9f
--- /dev/null
+++ b/lib/hadoop-0.20.0/build.xml
@@ -0,0 +1,1796 @@
+<?xml version="1.0"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<project name="Hadoop" default="compile"
+ xmlns:ivy="antlib:org.apache.ivy.ant">
+
+ <!-- Load all the default properties, and any the user wants -->
+ <!-- to contribute (without having to type -D or edit this file -->
+ <property file="${user.home}/build.properties" />
+ <property file="${basedir}/build.properties" />
+
+ <property name="Name" value="Hadoop"/>
+ <property name="name" value="hadoop"/>
+ <property name="version" value="0.20.1-dev"/>
+ <property name="final.name" value="${name}-${version}"/>
+ <property name="year" value="2009"/>
+
+ <property name="src.dir" value="${basedir}/src"/>
+ <property name="core.src.dir" value="${src.dir}/core"/>
+ <property name="mapred.src.dir" value="${src.dir}/mapred"/>
+ <property name="hdfs.src.dir" value="${src.dir}/hdfs"/>
+ <property name="native.src.dir" value="${basedir}/src/native"/>
+ <property name="examples.dir" value="${basedir}/src/examples"/>
+ <property name="anttasks.dir" value="${basedir}/src/ant"/>
+ <property name="lib.dir" value="${basedir}/lib"/>
+ <property name="conf.dir" value="${basedir}/conf"/>
+ <property name="contrib.dir" value="${basedir}/src/contrib"/>
+ <property name="docs.src" value="${basedir}/src/docs"/>
+ <property name="src.docs.cn" value="${basedir}/src/docs/cn"/>
+ <property name="changes.src" value="${docs.src}/changes"/>
+ <property name="c++.src" value="${basedir}/src/c++"/>
+ <property name="c++.utils.src" value="${c++.src}/utils"/>
+ <property name="c++.pipes.src" value="${c++.src}/pipes"/>
+ <property name="c++.examples.pipes.src" value="${examples.dir}/pipes"/>
+ <property name="c++.libhdfs.src" value="${c++.src}/libhdfs"/>
+ <property name="librecordio.src" value="${c++.src}/librecordio"/>
+ <property name="tools.src" value="${basedir}/src/tools"/>
+
+ <property name="xercescroot" value=""/>
+ <property name="build.dir" value="${basedir}/build"/>
+ <property name="build.classes" value="${build.dir}/classes"/>
+ <property name="build.src" value="${build.dir}/src"/>
+ <property name="build.tools" value="${build.dir}/tools"/>
+ <property name="build.webapps" value="${build.dir}/webapps"/>
+ <property name="build.examples" value="${build.dir}/examples"/>
+ <property name="build.anttasks" value="${build.dir}/ant"/>
+ <property name="build.librecordio" value="${build.dir}/librecordio"/>
+ <!-- convert spaces to _ so that mac os doesn't break things -->
+ <exec executable="sed" inputstring="${os.name}"
+ outputproperty="nonspace.os">
+ <arg value="s/ /_/g"/>
+ </exec>
+ <property name="build.platform"
+ value="${nonspace.os}-${os.arch}-${sun.arch.data.model}"/>
+ <property name="jvm.arch"
+ value="${sun.arch.data.model}"/>
+ <property name="build.native" value="${build.dir}/native/${build.platform}"/>
+ <property name="build.c++" value="${build.dir}/c++-build/${build.platform}"/>
+ <property name="build.c++.utils" value="${build.c++}/utils"/>
+ <property name="build.c++.pipes" value="${build.c++}/pipes"/>
+ <property name="build.c++.libhdfs" value="${build.c++}/libhdfs"/>
+ <property name="build.c++.examples.pipes"
+ value="${build.c++}/examples/pipes"/>
+ <property name="build.docs" value="${build.dir}/docs"/>
+ <property name="build.docs.cn" value="${build.dir}/docs/cn"/>
+ <property name="build.javadoc" value="${build.docs}/api"/>
+ <property name="build.javadoc.dev" value="${build.docs}/dev-api"/>
+ <property name="build.encoding" value="ISO-8859-1"/>
+ <property name="install.c++" value="${build.dir}/c++/${build.platform}"/>
+ <property name="install.c++.examples"
+ value="${build.dir}/c++-examples/${build.platform}"/>
+
+ <property name="test.src.dir" value="${basedir}/src/test"/>
+ <property name="test.lib.dir" value="${basedir}/src/test/lib"/>
+ <property name="test.build.dir" value="${build.dir}/test"/>
+ <property name="test.generated.dir" value="${test.build.dir}/src"/>
+ <property name="test.build.data" value="${test.build.dir}/data"/>
+ <property name="test.cache.data" value="${test.build.dir}/cache"/>
+ <property name="test.debug.data" value="${test.build.dir}/debug"/>
+ <property name="test.log.dir" value="${test.build.dir}/logs"/>
+ <property name="test.build.classes" value="${test.build.dir}/classes"/>
+ <property name="test.build.testjar" value="${test.build.dir}/testjar"/>
+ <property name="test.build.testshell" value="${test.build.dir}/testshell"/>
+ <property name="test.build.extraconf" value="${test.build.dir}/extraconf"/>
+ <property name="test.build.javadoc" value="${test.build.dir}/docs/api"/>
+ <property name="test.build.javadoc.dev" value="${test.build.dir}/docs/dev-api"/>
+ <property name="test.include" value="Test*"/>
+ <property name="test.classpath.id" value="test.classpath"/>
+ <property name="test.output" value="no"/>
+ <property name="test.timeout" value="900000"/>
+ <property name="test.junit.output.format" value="plain"/>
+ <property name="test.junit.fork.mode" value="perTest" />
+ <property name="test.junit.printsummary" value="yes" />
+ <property name="test.junit.haltonfailure" value="no" />
+ <property name="test.junit.maxmemory" value="512m" />
+
+ <property name="test.libhdfs.conf.dir" value="${c++.libhdfs.src}/tests/conf"/>
+ <property name="test.libhdfs.dir" value="${test.build.dir}/libhdfs"/>
+
+ <property name="librecordio.test.dir" value="${test.build.dir}/librecordio"/>
+ <property name="web.src.dir" value="${basedir}/src/web"/>
+ <property name="src.webapps" value="${basedir}/src/webapps"/>
+
+ <property name="javadoc.link.java"
+ value="http://java.sun.com/javase/6/docs/api/"/>
+ <property name="javadoc.packages" value="org.apache.hadoop.*"/>
+
+ <property name="dist.dir" value="${build.dir}/${final.name}"/>
+
+ <property name="javac.debug" value="on"/>
+ <property name="javac.optimize" value="on"/>
+ <property name="javac.deprecation" value="off"/>
+ <property name="javac.version" value="1.6"/>
+ <property name="javac.args" value=""/>
+ <property name="javac.args.warnings" value="-Xlint:unchecked"/>
+
+ <property name="clover.db.dir" location="${build.dir}/test/clover/db"/>
+ <property name="clover.report.dir" location="${build.dir}/test/clover/reports"/>
+
+ <property name="rat.reporting.classname" value="rat.Report"/>
+
+ <property name="jdiff.build.dir" value="${build.docs}/jdiff"/>
+ <property name="jdiff.xml.dir" value="${lib.dir}/jdiff"/>
+ <property name="jdiff.stable" value="0.19.1"/>
+ <property name="jdiff.stable.javadoc"
+ value="http://hadoop.apache.org/core/docs/r${jdiff.stable}/api/"/>
+
+ <property name="scratch.dir" value="${user.home}/tmp"/>
+ <property name="svn.cmd" value="svn"/>
+ <property name="grep.cmd" value="grep"/>
+ <property name="patch.cmd" value="patch"/>
+ <property name="make.cmd" value="make"/>
+
+ <!-- IVY properteis set here -->
+ <property name="ivy.dir" location="ivy" />
+ <loadproperties srcfile="${ivy.dir}/libraries.properties"/>
+ <property name="ivy.jar" location="${ivy.dir}/ivy-${ivy.version}.jar"/>
+ <property name="ivy_repo_url" value="http://repo2.maven.org/maven2/org/apache/ivy/ivy/${ivy.version}/ivy-${ivy.version}.jar"/>
+ <property name="ivysettings.xml" location="${ivy.dir}/ivysettings.xml" />
+ <property name="ivy.org" value="org.apache.hadoop"/>
+ <property name="build.dir" location="build" />
+ <property name="dist.dir" value="${build.dir}/${final.name}"/>
+ <property name="build.ivy.dir" location="${build.dir}/ivy" />
+ <property name="build.ivy.lib.dir" location="${build.ivy.dir}/lib" />
+ <property name="common.ivy.lib.dir" location="${build.ivy.lib.dir}/${ant.project.name}/common"/>
+ <property name="build.ivy.report.dir" location="${build.ivy.dir}/report" />
+ <property name="build.ivy.maven.dir" location="${build.ivy.dir}/maven" />
+ <property name="build.ivy.maven.pom" location="${build.ivy.maven.dir}/hadoop-core-${hadoop.version}.pom" />
+ <property name="build.ivy.maven.jar" location="${build.ivy.maven.dir}/hadoop-core-${hadoop.version}.jar" />
+
+ <!--this is the naming policy for artifacts we want pulled down-->
+ <property name="ivy.artifact.retrieve.pattern" value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/>
+
+ <!--this is how artifacts that get built are named-->
+ <property name="ivy.publish.pattern" value="hadoop-[revision]-core.[ext]"/>
+ <property name="hadoop.jar" location="${build.dir}/hadoop-${hadoop.version}-core.jar" />
+
+ <!-- jdiff.home property set -->
+ <property name="jdiff.home" value="${build.ivy.lib.dir}/${ant.project.name}/jdiff"/>
+ <property name="jdiff.jar" value="${jdiff.home}/jdiff-${jdiff.version}.jar"/>
+ <property name="xerces.jar" value="${jdiff.home}/xerces-${xerces.version}.jar"/>
+
+ <property name="clover.jar" location="${clover.home}/lib/clover.jar"/>
+ <available property="clover.present" file="${clover.jar}" />
+
+ <!-- check if clover reports should be generated -->
+ <condition property="clover.enabled">
+ <and>
+ <isset property="run.clover"/>
+ <isset property="clover.present"/>
+ </and>
+ </condition>
+
+ <!-- the normal classpath -->
+ <path id="classpath">
+ <pathelement location="${build.classes}"/>
+ <fileset dir="${lib.dir}">
+ <include name="**/*.jar" />
+ <exclude name="**/excluded/" />
+ </fileset>
+ <pathelement location="${conf.dir}"/>
+ <path refid="ivy-common.classpath"/>
+ </path>
+
+ <!-- the unit test classpath: uses test.src.dir for configuration -->
+ <path id="test.classpath">
+ <pathelement location="${test.build.extraconf}"/>
+ <pathelement location="${test.build.classes}" />
+ <pathelement location="${test.src.dir}"/>
+ <pathelement location="${build.dir}"/>
+ <pathelement location="${build.examples}"/>
+ <pathelement location="${build.tools}"/>
+ <pathelement path="${clover.jar}"/>
+ <fileset dir="${test.lib.dir}">
+ <include name="**/*.jar" />
+ <exclude name="**/excluded/" />
+ </fileset>
+ <path refid="classpath"/>
+ </path>
+
+ <!-- the cluster test classpath: uses conf.dir for configuration -->
+ <path id="test.cluster.classpath">
+ <path refid="classpath"/>
+ <pathelement location="${test.build.classes}" />
+ <pathelement location="${test.src.dir}"/>
+ <pathelement location="${build.dir}"/>
+ </path>
+
+ <!-- properties dependent on the items defined above. -->
+ <!--<available classname="${rat.reporting.classname}" classpathref="classpath" property="rat.present" value="true"/> -->
+
+ <!-- ====================================================== -->
+ <!-- Macro definitions -->
+ <!-- ====================================================== -->
+ <macrodef name="macro_tar" description="Worker Macro for tar">
+ <attribute name="param.destfile"/>
+ <element name="param.listofitems"/>
+ <sequential>
+ <tar compression="gzip" longfile="gnu"
+ destfile="@{param.destfile}">
+ <param.listofitems/>
+ </tar>
+ </sequential>
+ </macrodef>
+
+ <!-- ====================================================== -->
+ <!-- Stuff needed by all targets -->
+ <!-- ====================================================== -->
+ <target name="init" depends="ivy-retrieve-common">
+ <mkdir dir="${build.dir}"/>
+ <mkdir dir="${build.classes}"/>
+ <mkdir dir="${build.tools}"/>
+ <mkdir dir="${build.src}"/>
+ <mkdir dir="${build.webapps}/task/WEB-INF"/>
+ <mkdir dir="${build.webapps}/job/WEB-INF"/>
+ <mkdir dir="${build.webapps}/hdfs/WEB-INF"/>
+ <mkdir dir="${build.webapps}/datanode/WEB-INF"/>
+ <mkdir dir="${build.webapps}/secondary/WEB-INF"/>
+ <mkdir dir="${build.examples}"/>
+ <mkdir dir="${build.anttasks}"/>
+ <mkdir dir="${build.dir}/c++"/>
+
+ <mkdir dir="${test.build.dir}"/>
+ <mkdir dir="${test.build.classes}"/>
+ <mkdir dir="${test.build.testjar}"/>
+ <mkdir dir="${test.build.testshell}"/>
+ <mkdir dir="${test.build.extraconf}"/>
+ <tempfile property="touch.temp.file" destDir="${java.io.tmpdir}"/>
+ <touch millis="0" file="${touch.temp.file}">
+ <fileset dir="${conf.dir}" includes="**/*.template"/>
+ <fileset dir="${contrib.dir}" includes="**/*.template"/>
+ </touch>
+ <delete file="${touch.temp.file}"/>
+ <!-- copy all of the jsp and static files -->
+ <copy todir="${build.webapps}">
+ <fileset dir="${src.webapps}">
+ <exclude name="**/*.jsp" />
+ </fileset>
+ </copy>
+
+ <copy todir="${conf.dir}" verbose="true">
+ <fileset dir="${conf.dir}" includes="**/*.template"/>
+ <mapper type="glob" from="*.template" to="*"/>
+ </copy>
+
+ <copy todir="${contrib.dir}" verbose="true">
+ <fileset dir="${contrib.dir}" includes="**/*.template"/>
+ <mapper type="glob" from="*.template" to="*"/>
+ </copy>
+
+ <exec executable="sh">
+ <arg line="src/saveVersion.sh ${version}"/>
+ </exec>
+
+ <exec executable="sh">
+ <arg line="src/fixFontsPath.sh ${src.docs.cn}"/>
+ </exec>
+ </target>
+
+ <!-- ====================================================== -->
+ <!-- Compile the Java files -->
+ <!-- ====================================================== -->
+ <target name="record-parser" depends="init" if="javacc.home">
+ <javacc
+ target="${core.src.dir}/org/apache/hadoop/record/compiler/generated/rcc.jj"
+ outputdirectory="${core.src.dir}/org/apache/hadoop/record/compiler/generated"
+ javacchome="${javacc.home}" />
+ </target>
+
+ <target name="compile-rcc-compiler" depends="init, record-parser">
+ <javac
+ encoding="${build.encoding}"
+ srcdir="${core.src.dir}"
+ includes="org/apache/hadoop/record/compiler/**/*.java"
+ destdir="${build.classes}"
+ debug="${javac.debug}"
+ optimize="${javac.optimize}"
+ target="${javac.version}"
+ source="${javac.version}"
+ deprecation="${javac.deprecation}">
+ <compilerarg line="${javac.args}"/>
+ <classpath refid="classpath"/>
+ </javac>
+
+ <taskdef name="recordcc" classname="org.apache.hadoop.record.compiler.ant.RccTask">
+ <classpath refid="classpath" />
+ </taskdef>
+ </target>
+
+ <target name="compile-core-classes" depends="init, compile-rcc-compiler">
+ <taskdef classname="org.apache.jasper.JspC" name="jsp-compile" >
+ <classpath refid="test.classpath"/>
+ </taskdef>
+ <!-- Compile Java files (excluding JSPs) checking warnings -->
+ <javac
+ encoding="${build.encoding}"
+ srcdir="${core.src.dir}"
+ includes="org/apache/hadoop/**/*.java"
+ destdir="${build.classes}"
+ debug="${javac.debug}"
+ optimize="${javac.optimize}"
+ target="${javac.version}"
+ source="${javac.version}"
+ deprecation="${javac.deprecation}">
+ <compilerarg line="${javac.args} ${javac.args.warnings}" />
+ <classpath refid="classpath"/>
+ </javac>
+
+ <copy todir="${build.classes}">
+ <fileset dir="${core.src.dir}" includes="**/*.properties"/>
+ <fileset dir="${core.src.dir}" includes="core-default.xml"/>
+ </copy>
+
+ </target>
+
+ <target name="compile-mapred-classes" depends="compile-core-classes">
+ <jsp-compile
+ uriroot="${src.webapps}/task"
+ outputdir="${build.src}"
+ package="org.apache.hadoop.mapred"
+ webxml="${build.webapps}/task/WEB-INF/web.xml">
+ </jsp-compile>
+
+ <jsp-compile
+ uriroot="${src.webapps}/job"
+ outputdir="${build.src}"
+ package="org.apache.hadoop.mapred"
+ webxml="${build.webapps}/job/WEB-INF/web.xml">
+ </jsp-compile>
+
+ <!-- Compile Java files (excluding JSPs) checking warnings -->
+ <javac
+ encoding="${build.encoding}"
+ srcdir="${mapred.src.dir};${build.src}"
+ includes="org/apache/hadoop/**/*.java"
+ destdir="${build.classes}"
+ debug="${javac.debug}"
+ optimize="${javac.optimize}"
+ target="${javac.version}"
+ source="${javac.version}"
+ deprecation="${javac.deprecation}">
+ <compilerarg line="${javac.args} ${javac.args.warnings}" />
+ <classpath refid="classpath"/>
+ </javac>
+
+ <copy todir="${build.classes}">
+ <fileset dir="${mapred.src.dir}" includes="**/*.properties"/>
+ <fileset dir="${mapred.src.dir}" includes="mapred-default.xml"/>
+ </copy>
+ </target>
+
+ <target name="compile-hdfs-classes" depends="compile-core-classes">
+ <jsp-compile
+ uriroot="${src.webapps}/hdfs"
+ outputdir="${build.src}"
+ package="org.apache.hadoop.hdfs.server.namenode"
+ webxml="${build.webapps}/hdfs/WEB-INF/web.xml">
+ </jsp-compile>
+
+ <jsp-compile
+ uriroot="${src.webapps}/datanode"
+ outputdir="${build.src}"
+ package="org.apache.hadoop.hdfs.server.datanode"
+ webxml="${build.webapps}/datanode/WEB-INF/web.xml">
+ </jsp-compile>
+
+ <!-- Compile Java files (excluding JSPs) checking warnings -->
+ <javac
+ encoding="${build.encoding}"
+ srcdir="${hdfs.src.dir};${build.src}"
+ includes="org/apache/hadoop/**/*.java"
+ destdir="${build.classes}"
+ debug="${javac.debug}"
+ optimize="${javac.optimize}"
+ target="${javac.version}"
+ source="${javac.version}"
+ deprecation="${javac.deprecation}">
+ <compilerarg line="${javac.args} ${javac.args.warnings}" />
+ <classpath refid="classpath"/>
+ </javac>
+
+ <copy todir="${build.classes}">
+ <fileset dir="${hdfs.src.dir}" includes="**/*.properties"/>
+ <fileset dir="${hdfs.src.dir}" includes="hdfs-default.xml"/>
+ </copy>
+ </target>
+
+ <target name="compile-tools" depends="init">
+ <javac
+ encoding="${build.encoding}"
+ srcdir="${tools.src}"
+ includes="org/apache/hadoop/**/*.java"
+ destdir="${build.tools}"
+ debug="${javac.debug}"
+ optimize="${javac.optimize}"
+ target="${javac.version}"
+ source="${javac.version}"
+ deprecation="${javac.deprecation}">
+ <compilerarg line="${javac.args} ${javac.args.warnings}" />
+ <classpath refid="classpath"/>
+ </javac>
+
+ <copy todir="${build.tools}">
+ <fileset
+ dir="${tools.src}"
+ includes="**/*.properties"
+ />
+ </copy>
+ </target>
+
+ <target name="compile-native">
+ <antcall target="compile-core-native">
+ <param name="compile.native" value="true"/>
+ </antcall>
+ </target>
+
+ <target name="compile-core-native" depends="compile-core-classes"
+ if="compile.native">
+
+ <mkdir dir="${build.native}/lib"/>
+ <mkdir dir="${build.native}/src/org/apache/hadoop/io/compress/zlib"/>
+
+ <javah
+ classpath="${build.classes}"
+ destdir="${build.native}/src/org/apache/hadoop/io/compress/zlib"
+ force="yes"
+ verbose="yes"
+ >
+ <class name="org.apache.hadoop.io.compress.zlib.ZlibCompressor" />
+ <class name="org.apache.hadoop.io.compress.zlib.ZlibDecompressor" />
+ </javah>
+
+ <exec dir="${build.native}" executable="sh" failonerror="true">
+ <env key="OS_NAME" value="${os.name}"/>
+ <env key="OS_ARCH" value="${os.arch}"/>
+ <env key="JVM_DATA_MODEL" value="${sun.arch.data.model}"/>
+ <env key="HADOOP_NATIVE_SRCDIR" value="${native.src.dir}"/>
+ <arg line="${native.src.dir}/configure"/>
+ </exec>
+
+ <exec dir="${build.native}" executable="${make.cmd}" failonerror="true">
+ <env key="OS_NAME" value="${os.name}"/>
+ <env key="OS_ARCH" value="${os.arch}"/>
+ <env key="JVM_DATA_MODEL" value="${sun.arch.data.model}"/>
+ <env key="HADOOP_NATIVE_SRCDIR" value="${native.src.dir}"/>
+ </exec>
+
+ <exec dir="${build.native}" executable="sh" failonerror="true">
+ <arg line="${build.native}/libtool --mode=install cp ${build.native}/lib/libhadoop.la ${build.native}/lib"/>
+ </exec>
+
+ </target>
+
+ <target name="compile-core"
+ depends="clover,compile-core-classes,compile-mapred-classes,
+ compile-hdfs-classes,compile-core-native,compile-c++"
+ description="Compile core only">
+ </target>
+
+ <target name="compile-contrib" depends="compile-core,compile-c++-libhdfs">
+ <subant target="compile">
+ <property name="version" value="${version}"/>
+ <fileset file="${contrib.dir}/build.xml"/>
+ </subant>
+ </target>
+
+ <target name="compile" depends="compile-core, compile-contrib, compile-ant-tasks, compile-tools" description="Compile core, contrib">
+ </target>
+
+ <target name="compile-examples"
+ depends="compile-core,compile-tools,compile-c++-examples">
+ <javac
+ encoding="${build.encoding}"
+ srcdir="${examples.dir}"
+ includes="org/apache/hadoop/**/*.java"
+ destdir="${build.examples}"
+ debug="${javac.debug}"
+ optimize="${javac.optimize}"
+ target="${javac.version}"
+ source="${javac.version}"
+ deprecation="${javac.deprecation}">
+ <compilerarg line="${javac.args} ${javac.args.warnings}" />
+ <classpath>
+ <path refid="classpath"/>
+ <pathelement location="${build.tools}"/>
+ </classpath>
+ </javac>
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Make hadoop.jar -->
+ <!-- ================================================================== -->
+ <!-- -->
+ <!-- ================================================================== -->
+ <target name="jar" depends="compile-core" description="Make hadoop.jar">
+ <tar compression="gzip" destfile="${build.classes}/bin.tgz">
+ <tarfileset dir="bin" mode="755"/>
+ </tar>
+ <jar jarfile="${build.dir}/${final.name}-core.jar"
+ basedir="${build.classes}">
+ <manifest>
+ <section name="org/apache/hadoop">
+ <attribute name="Implementation-Title" value="Hadoop"/>
+ <attribute name="Implementation-Version" value="${version}"/>
+ <attribute name="Implementation-Vendor" value="Apache"/>
+ </section>
+ </manifest>
+ <fileset file="${conf.dir}/commons-logging.properties"/>
+ <fileset file="${conf.dir}/log4j.properties"/>
+ <fileset file="${conf.dir}/hadoop-metrics.properties"/>
+ <zipfileset dir="${build.webapps}" prefix="webapps"/>
+ </jar>
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Make the Hadoop examples jar. -->
+ <!-- ================================================================== -->
+ <!-- -->
+ <!-- ================================================================== -->
+ <target name="examples" depends="jar, compile-examples" description="Make the Hadoop examples jar.">
+ <jar jarfile="${build.dir}/${final.name}-examples.jar"
+ basedir="${build.examples}">
+ <manifest>
+ <attribute name="Main-Class"
+ value="org/apache/hadoop/examples/ExampleDriver"/>
+ </manifest>
+ </jar>
+ </target>
+
+ <target name="tools-jar" depends="jar, compile-tools"
+ description="Make the Hadoop tools jar.">
+ <jar jarfile="${build.dir}/${final.name}-tools.jar"
+ basedir="${build.tools}">
+ <manifest>
+ <attribute name="Main-Class"
+ value="org/apache/hadoop/examples/ExampleDriver"/>
+ </manifest>
+ </jar>
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Make the Hadoop metrics jar. (for use outside Hadoop) -->
+ <!-- ================================================================== -->
+ <!-- -->
+ <!-- ================================================================== -->
+ <target name="metrics.jar" depends="compile-core" description="Make the Hadoop metrics jar. (for use outside Hadoop)">
+ <jar jarfile="${build.dir}/hadoop-metrics-${version}.jar"
+ basedir="${build.classes}">
+ <include name="**/metrics/**" />
+ <exclude name="**/package.html" />
+ </jar>
+ </target>
+
+ <target name="generate-test-records" depends="compile-rcc-compiler">
+ <recordcc destdir="${test.generated.dir}">
+ <fileset dir="${test.src.dir}"
+ includes="**/*.jr" />
+ </recordcc>
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Compile test code -->
+ <!-- ================================================================== -->
+ <target name="compile-core-test" depends="compile-examples, compile-tools, generate-test-records">
+ <javac
+ encoding="${build.encoding}"
+ srcdir="${test.generated.dir}"
+ includes="org/apache/hadoop/**/*.java"
+ destdir="${test.build.classes}"
+ debug="${javac.debug}"
+ optimize="${javac.optimize}"
+ target="${javac.version}"
+ source="${javac.version}"
+ deprecation="${javac.deprecation}">
+ <compilerarg line="${javac.args}" />
+ <classpath refid="test.classpath"/>
+ </javac>
+ <javac
+ encoding="${build.encoding}"
+ srcdir="${test.src.dir}"
+ includes="org/apache/hadoop/**/*.java"
+ destdir="${test.build.classes}"
+ debug="${javac.debug}"
+ optimize="${javac.optimize}"
+ target="${javac.version}"
+ source="${javac.version}"
+ deprecation="${javac.deprecation}">
+ <compilerarg line="${javac.args} ${javac.args.warnings}" />
+ <classpath refid="test.classpath"/>
+ </javac>
+ <javac
+ encoding="${build.encoding}"
+ srcdir="${test.src.dir}/testjar"
+ includes="*.java"
+ destdir="${test.build.testjar}"
+ debug="${javac.debug}"
+ optimize="${javac.optimize}"
+ target="${javac.version}"
+ source="${javac.version}"
+ deprecation="${javac.deprecation}">
+ <compilerarg line="${javac.args} ${javac.args.warnings}" />
+ <classpath refid="test.classpath"/>
+ </javac>
+ <delete file="${test.build.testjar}/testjob.jar"/>
+ <jar jarfile="${test.build.testjar}/testjob.jar"
+ basedir="${test.build.testjar}">
+ </jar>
+ <javac
+ encoding="${build.encoding}"
+ srcdir="${test.src.dir}/testshell"
+ includes="*.java"
+ destdir="${test.build.testshell}"
+ debug="${javac.debug}"
+ optimize="${javac.optimize}"
+ target="${javac.version}"
+ source="${javac.version}"
+ deprecation="${javac.deprecation}">
+ <compilerarg line="${javac.args} ${javac.args.warnings}"/>
+ <classpath refid="test.classpath"/>
+ </javac>
+ <delete file="${test.build.testshell}/testshell.jar"/>
+ <jar jarfile="${test.build.testshell}/testshell.jar"
+ basedir="${test.build.testshell}">
+ </jar>
+
+ <delete dir="${test.cache.data}"/>
+ <mkdir dir="${test.cache.data}"/>
+ <delete dir="${test.debug.data}"/>
+ <mkdir dir="${test.debug.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/mapred/testscript.txt" todir="${test.debug.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.txt" todir="${test.cache.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.jar" todir="${test.cache.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.zip" todir="${test.cache.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.tar" todir="${test.cache.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.tgz" todir="${test.cache.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.tar.gz" todir="${test.cache.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/hdfs/hadoop-14-dfs-dir.tgz" todir="${test.cache.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt" todir="${test.cache.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/cli/testConf.xml" todir="${test.cache.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data15bytes" todir="${test.cache.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data30bytes" todir="${test.cache.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data60bytes" todir="${test.cache.data}"/>
+ <copy file="${test.src.dir}/org/apache/hadoop/cli/clitest_data/data120bytes" todir="${test.cache.data}"/>
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Make hadoop-test.jar -->
+ <!-- ================================================================== -->
+ <!-- -->
+ <!-- ================================================================== -->
+ <target name="jar-test" depends="compile-core-test" description="Make hadoop-test.jar">
+ <jar jarfile="${build.dir}/${final.name}-test.jar"
+ basedir="${test.build.classes}">
+ <manifest>
+ <attribute name="Main-Class"
+ value="org/apache/hadoop/test/AllTestDriver"/>
+ <section name="org/apache/hadoop">
+ <attribute name="Implementation-Title" value="Hadoop"/>
+ <attribute name="Implementation-Version" value="${version}"/>
+ <attribute name="Implementation-Vendor" value="Apache"/>
+ </section>
+ </manifest>
+ </jar>
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Run unit tests -->
+ <!-- ================================================================== -->
+ <target name="test-core" depends="jar-test" description="Run core unit tests">
+
+ <delete dir="${test.build.data}"/>
+ <mkdir dir="${test.build.data}"/>
+ <delete dir="${test.log.dir}"/>
+ <mkdir dir="${test.log.dir}"/>
+ <copy file="${test.src.dir}/hadoop-policy.xml"
+ todir="${test.build.extraconf}" />
+ <junit showoutput="${test.output}"
+ printsummary="${test.junit.printsummary}"
+ haltonfailure="${test.junit.haltonfailure}"
+ fork="yes"
+ forkmode="${test.junit.fork.mode}"
+ maxmemory="${test.junit.maxmemory}"
+ dir="${basedir}" timeout="${test.timeout}"
+ errorProperty="tests.failed" failureProperty="tests.failed">
+ <sysproperty key="test.build.data" value="${test.build.data}"/>
+ <sysproperty key="test.cache.data" value="${test.cache.data}"/>
+ <sysproperty key="test.debug.data" value="${test.debug.data}"/>
+ <sysproperty key="hadoop.log.dir" value="${test.log.dir}"/>
+ <sysproperty key="test.src.dir" value="${test.src.dir}"/>
+ <sysproperty key="test.build.extraconf" value="${test.build.extraconf}" />
+ <sysproperty key="hadoop.policy.file" value="hadoop-policy.xml"/>
+ <sysproperty key="java.library.path"
+ value="${build.native}/lib:${lib.dir}/native/${build.platform}"/>
+ <sysproperty key="install.c++.examples" value="${install.c++.examples}"/>
+ <!-- set compile.c++ in the child jvm only if it is set -->
+ <syspropertyset dynamic="no">
+ <propertyref name="compile.c++"/>
+ </syspropertyset>
+ <classpath refid="${test.classpath.id}"/>
+ <formatter type="${test.junit.output.format}" />
+ <batchtest todir="${test.build.dir}" unless="testcase">
+ <fileset dir="${test.src.dir}"
+ includes="**/${test.include}.java"
+ excludes="**/${test.exclude}.java" />
+ </batchtest>
+ <batchtest todir="${test.build.dir}" if="testcase">
+ <fileset dir="${test.src.dir}" includes="**/${testcase}.java"/>
+ </batchtest>
+ </junit>
+ <fail if="tests.failed">Tests failed!</fail>
+ </target>
+
+ <target name="test-contrib" depends="compile, compile-core-test" description="Run contrib unit tests">
+ <subant target="test">
+ <property name="version" value="${version}"/>
+ <fileset file="${contrib.dir}/build.xml"/>
+ </subant>
+ </target>
+
+ <target name="test" depends="test-core, test-contrib" description="Run core, contrib unit tests">
+ </target>
+
+ <!-- Run all unit tests, not just Test*, and use non-test configuration. -->
+ <target name="test-cluster" description="Run all unit tests, not just Test*, and use non-test configuration.">
+ <antcall target="test">
+ <param name="test.include" value="*"/>
+ <param name="test.classpath.id" value="test.cluster.classpath"/>
+ </antcall>
+ </target>
+
+ <target name="nightly" depends="test, tar">
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Run optional third-party tool targets -->
+ <!-- ================================================================== -->
+ <target name="checkstyle" depends="ivy-retrieve-checkstyle,check-for-checkstyle" if="checkstyle.present" description="Run optional third-party tool targets">
+ <taskdef resource="checkstyletask.properties">
+ <classpath refid="checkstyle-classpath"/>
+ </taskdef>
+
+ <mkdir dir="${test.build.dir}"/>
+
+ <checkstyle config="${test.src.dir}/checkstyle.xml"
+ failOnViolation="false">
+ <fileset dir="${core.src.dir}" includes="**/*.java" excludes="**/generated/**"/>
+ <fileset dir="${mapred.src.dir}" includes="**/*.java" excludes="**/generated/**"/>
+ <fileset dir="${hdfs.src.dir}" includes="**/*.java" excludes="**/generated/**"/>
+ <formatter type="xml" toFile="${test.build.dir}/checkstyle-errors.xml"/>
+ </checkstyle>
+
+ <xslt style="${test.src.dir}/checkstyle-noframes-sorted.xsl"
+ in="${test.build.dir}/checkstyle-errors.xml"
+ out="${test.build.dir}/checkstyle-errors.html"/>
+ </target>
+
+ <target name="check-for-checkstyle">
+ <available property="checkstyle.present" resource="checkstyletask.properties">
+ <classpath refid="checkstyle-classpath"/>
+ </available>
+ </target>
+
+ <property name="findbugs.home" value=""/>
+ <target name="findbugs" depends="check-for-findbugs, tar" if="findbugs.present" description="Run findbugs if present">
+ <property name="findbugs.out.dir" value="${test.build.dir}/findbugs"/>
+ <property name="findbugs.exclude.file" value="${test.src.dir}/findbugsExcludeFile.xml"/>
+ <property name="findbugs.report.htmlfile" value="${findbugs.out.dir}/hadoop-findbugs-report.html"/>
+ <property name="findbugs.report.xmlfile" value="${findbugs.out.dir}/hadoop-findbugs-report.xml"/>
+ <taskdef name="findbugs" classname="edu.umd.cs.findbugs.anttask.FindBugsTask"
+ classpath="${findbugs.home}/lib/findbugs-ant.jar" />
+
+ <mkdir dir="${findbugs.out.dir}"/>
+
+ <findbugs home="${findbugs.home}" output="xml:withMessages"
+ outputFile="${findbugs.report.xmlfile}" effort="max"
+ excludeFilter="${findbugs.exclude.file}" jvmargs="-Xmx512M">
+ <auxClasspath>
+ <fileset dir="${lib.dir}">
+ <include name="**/*.jar"/>
+ </fileset>
+ <fileset dir="${build.ivy.lib.dir}/${ant.project.name}/common">
+ <include name="**/*.jar"/>
+ </fileset>
+ </auxClasspath>
+ <sourcePath path="${core.src.dir}"/>
+ <sourcePath path="${mapred.src.dir}"/>
+ <sourcePath path="${hdfs.src.dir}"/>
+ <sourcePath path="${examples.dir}" />
+ <sourcePath path="${tools.src}" />
+ <sourcePath path="${basedir}/src/contrib/streaming/src/java" />
+ <class location="${basedir}/build/${final.name}-core.jar" />
+ <class location="${basedir}/build/${final.name}-examples.jar" />
+ <class location="${basedir}/build/${final.name}-tools.jar" />
+ <class location="${basedir}/build/contrib/streaming/${final.name}-streaming.jar" />
+ </findbugs>
+
+ <xslt style="${findbugs.home}/src/xsl/default.xsl"
+ in="${findbugs.report.xmlfile}"
+ out="${findbugs.report.htmlfile}"/>
+ </target>
+
+ <target name="check-for-findbugs">
+ <available property="findbugs.present"
+ file="${findbugs.home}/lib/findbugs.jar" />
+ </target>
+
+
+ <!-- ================================================================== -->
+ <!-- Documentation -->
+ <!-- ================================================================== -->
+
+ <target name="docs" depends="forrest.check" description="Generate forrest-based documentation. To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line." if="forrest.home">
+ <exec dir="${docs.src}" executable="${forrest.home}/bin/forrest"
+ failonerror="true">
+ <env key="JAVA_HOME" value="${java5.home}"/>
+ </exec>
+ <copy todir="${build.docs}">
+ <fileset dir="${docs.src}/build/site/" />
+ </copy>
+ <copy file="${docs.src}/releasenotes.html" todir="${build.docs}"/>
+ <style basedir="${core.src.dir}" destdir="${build.docs}"
+ includes="core-default.xml" style="conf/configuration.xsl"/>
+ <style basedir="${hdfs.src.dir}" destdir="${build.docs}"
+ includes="hdfs-default.xml" style="conf/configuration.xsl"/>
+ <style basedir="${mapred.src.dir}" destdir="${build.docs}"
+ includes="mapred-default.xml" style="conf/configuration.xsl"/>
+ <antcall target="changes-to-html"/>
+ <antcall target="cn-docs"/>
+ </target>
+
+ <target name="cn-docs" depends="forrest.check, init"
+ description="Generate forrest-based Chinese documentation. To use, specify -Dforrest.home=&lt;base of Apache Forrest installation&gt; on the command line."
+ if="forrest.home">
+ <exec dir="${src.docs.cn}" executable="${forrest.home}/bin/forrest" failonerror="true">
+ <env key="LANG" value="en_US.utf8"/>
+ <env key="JAVA_HOME" value="${java5.home}"/>
+ </exec>
+ <copy todir="${build.docs.cn}">
+ <fileset dir="${src.docs.cn}/build/site/" />
+ </copy>
+ <style basedir="${core.src.dir}" destdir="${build.docs.cn}"
+ includes="core-default.xml" style="conf/configuration.xsl"/>
+ <style basedir="${hdfs.src.dir}" destdir="${build.docs.cn}"
+ includes="hdfs-default.xml" style="conf/configuration.xsl"/>
+ <style basedir="${mapred.src.dir}" destdir="${build.docs.cn}"
+ includes="mapred-default.xml" style="conf/configuration.xsl"/>
+ <antcall target="changes-to-html"/>
+ </target>
+
+ <target name="forrest.check" unless="forrest.home" depends="java5.check">
+ <fail message="'forrest.home' is not defined. Please pass -Dforrest.home=&lt;base of Apache Forrest installation&gt; to Ant on the command-line." />
+ </target>
+
+ <target name="java5.check" unless="java5.home">
+ <fail message="'java5.home' is not defined. Forrest requires Java 5. Please pass -Djava5.home=&lt;base of Java 5 distribution&gt; to Ant on the command-line." />
+ </target>
+
+ <target name="javadoc-dev" description="Generate javadoc for hadoop developers">
+ <mkdir dir="${build.javadoc.dev}"/>
+ <javadoc
+ overview="${core.src.dir}/overview.html"
+ packagenames="org.apache.hadoop.*"
+ destdir="${build.javadoc.dev}"
+ author="true"
+ version="true"
+ use="true"
+ windowtitle="${Name} ${version} API"
+ doctitle="${Name} ${version} Developer API"
+ bottom="Copyright &amp;copy; ${year} The Apache Software Foundation"
+ >
+ <packageset dir="${core.src.dir}"/>
+ <packageset dir="${mapred.src.dir}"/>
+ <packageset dir="${hdfs.src.dir}"/>
+ <packageset dir="${examples.dir}"/>
+
+ <packageset dir="src/contrib/streaming/src/java"/>
+ <packageset dir="src/contrib/data_join/src/java"/>
+ <packageset dir="src/contrib/index/src/java"/>
+
+ <link href="${javadoc.link.java}"/>
+
+ <classpath >
+ <path refid="classpath" />
+ <fileset dir="src/contrib/">
+ <include name="*/lib/*.jar" />
+ </fileset>
+ <pathelement path="${java.class.path}"/>
+ </classpath>
+
+ <group title="Core" packages="org.apache.*"/>
+ <group title="Examples" packages="org.apache.hadoop.examples*"/>
+
+ <group title="contrib: Streaming" packages="org.apache.hadoop.streaming*"/>
+ <group title="contrib: DataJoin" packages="org.apache.hadoop.contrib.utils.join*"/>
+ <group title="contrib: Index" packages="org.apache.hadoop.contrib.index*"/>
+
+ </javadoc>
+ </target>
+
+ <target name="javadoc" depends="compile, ivy-retrieve-javadoc" description="Generate javadoc">
+ <mkdir dir="${build.javadoc}"/>
+ <javadoc
+ overview="${core.src.dir}/overview.html"
+ packagenames="org.apache.hadoop.*"
+ destdir="${build.javadoc}"
+ author="true"
+ version="true"
+ use="true"
+ windowtitle="${Name} ${version} API"
+ doctitle="${Name} ${version} API"
+ bottom="Copyright &amp;copy; ${year} The Apache Software Foundation"
+ >
+ <packageset dir="${core.src.dir}"/>
+ <packageset dir="${mapred.src.dir}"/>
+ <packageset dir="${examples.dir}"/>
+
+ <packageset dir="src/contrib/streaming/src/java"/>
+ <packageset dir="src/contrib/data_join/src/java"/>
+ <packageset dir="src/contrib/index/src/java"/>
+ <packageset dir="src/contrib/failmon/src/java/"/>
+
+ <link href="${javadoc.link.java}"/>
+
+ <classpath >
+ <path refid="classpath" />
+ <fileset dir="src/contrib/">
+ <include name="*/lib/*.jar" />
+ </fileset>
+ <path refid="javadoc-classpath"/>
+ <pathelement path="${java.class.path}"/>
+ <pathelement location="${build.tools}"/>
+ </classpath>
+
+ <group title="Core" packages="org.apache.*"/>
+ <group title="Examples" packages="org.apache.hadoop.examples*"/>
+
+ <group title="contrib: Streaming" packages="org.apache.hadoop.streaming*"/>
+ <group title="contrib: DataJoin" packages="org.apache.hadoop.contrib.utils.join*"/>
+ <group title="contrib: Index" packages="org.apache.hadoop.contrib.index*"/>
+ <group title="contrib: FailMon" packages="org.apache.hadoop.contrib.failmon*"/>
+ </javadoc>
+ </target>
+
+ <target name="api-xml" depends="ivy-retrieve-jdiff,javadoc,write-null">
+ <javadoc>
+ <doclet name="jdiff.JDiff"
+ path="${jdiff.jar}:${xerces.jar}">
+ <param name="-apidir" value="${jdiff.xml.dir}"/>
+ <param name="-apiname" value="hadoop ${version}"/>
+ </doclet>
+ <packageset dir="src/core"/>
+ <packageset dir="src/mapred"/>
+ <packageset dir="src/tools"/>
+ <classpath >
+ <path refid="classpath" />
+ <path refid="jdiff-classpath" />
+ <pathelement path="${java.class.path}"/>
+ </classpath>
+ </javadoc>
+ </target>
+
+ <target name="write-null">
+ <exec executable="touch">
+ <arg value="${jdiff.home}/Null.java"/>
+ </exec>
+ </target>
+
+ <target name="api-report" depends="ivy-retrieve-jdiff,api-xml">
+ <mkdir dir="${jdiff.build.dir}"/>
+ <javadoc sourcepath="src/core,src/hdfs,src,mapred,src/tools"
+ destdir="${jdiff.build.dir}"
+ sourceFiles="${jdiff.home}/Null.java">
+ <doclet name="jdiff.JDiff"
+ path="${jdiff.jar}:${xerces.jar}">
+ <param name="-oldapi" value="hadoop ${jdiff.stable}"/>
+ <param name="-newapi" value="hadoop ${version}"/>
+ <param name="-oldapidir" value="${jdiff.xml.dir}"/>
+ <param name="-newapidir" value="${jdiff.xml.dir}"/>
+ <param name="-javadocold" value="${jdiff.stable.javadoc}"/>
+ <param name="-javadocnew" value="../../api/"/>
+ <param name="-stats"/>
+ </doclet>
+ <classpath >
+ <path refid="classpath" />
+ <path refid="jdiff-classpath"/>
+ <pathelement path="${java.class.path}"/>
+ </classpath>
+ </javadoc>
+ </target>
+
+ <target name="changes-to-html" description="Convert CHANGES.txt into an html file">
+ <mkdir dir="${build.docs}"/>
+ <exec executable="perl" input="CHANGES.txt" output="${build.docs}/changes.html" failonerror="true">
+ <arg value="${changes.src}/changes2html.pl"/>
+ </exec>
+ <copy todir="${build.docs}">
+ <fileset dir="${changes.src}" includes="*.css"/>
+ </copy>
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- D I S T R I B U T I O N -->
+ <!-- ================================================================== -->
+ <!-- -->
+ <!-- ================================================================== -->
+ <target name="package" depends="compile, jar, javadoc, docs, cn-docs, api-report, examples, tools-jar, jar-test, ant-tasks, package-librecordio"
+ description="Build distribution">
+ <mkdir dir="${dist.dir}"/>
+ <mkdir dir="${dist.dir}/lib"/>
+ <mkdir dir="${dist.dir}/contrib"/>
+ <mkdir dir="${dist.dir}/bin"/>
+ <mkdir dir="${dist.dir}/docs"/>
+ <mkdir dir="${dist.dir}/docs/api"/>
+ <mkdir dir="${dist.dir}/docs/jdiff"/>
+
+ <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true">
+ <fileset dir="${common.ivy.lib.dir}"/>
+ </copy>
+
+ <copy todir="${dist.dir}/lib" includeEmptyDirs="false">
+ <fileset dir="lib">
+ <exclude name="**/native/**"/>
+ </fileset>
+ </copy>
+
+ <exec dir="${dist.dir}" executable="sh" failonerror="true">
+ <env key="BASE_NATIVE_LIB_DIR" value="${lib.dir}/native"/>
+ <env key="BUILD_NATIVE_DIR" value="${build.dir}/native"/>
+ <env key="DIST_LIB_DIR" value="${dist.dir}/lib/native"/>
+ <arg line="${native.src.dir}/packageNativeHadoop.sh"/>
+ </exec>
+
+ <subant target="package">
+ <!--Pass down the version in case its needed again and the target
+ distribution directory so contribs know where to install to.-->
+ <property name="version" value="${version}"/>
+ <property name="dist.dir" value="${dist.dir}"/>
+ <fileset file="${contrib.dir}/build.xml"/>
+ </subant>
+
+ <copy todir="${dist.dir}/webapps">
+ <fileset dir="${build.webapps}"/>
+ </copy>
+
+ <copy todir="${dist.dir}">
+ <fileset file="${build.dir}/${final.name}-*.jar"/>
+ </copy>
+
+ <copy todir="${dist.dir}/bin">
+ <fileset dir="bin"/>
+ </copy>
+
+ <copy todir="${dist.dir}/conf">
+ <fileset dir="${conf.dir}" excludes="**/*.template"/>
+ </copy>
+
+ <copy todir="${dist.dir}/docs">
+ <fileset dir="${build.docs}"/>
+ </copy>
+
+ <copy file="ivy.xml" tofile="${dist.dir}/ivy.xml"/>
+
+ <copy todir="${dist.dir}/ivy">
+ <fileset dir="ivy"/>
+ </copy>
+
+ <copy todir="${dist.dir}">
+ <fileset dir=".">
+ <include name="*.txt" />
+ </fileset>
+ </copy>
+
+ <copy todir="${dist.dir}/src" includeEmptyDirs="true">
+ <fileset dir="src" excludes="**/*.template **/docs/build/**/*"/>
+ </copy>
+
+ <copy todir="${dist.dir}/c++" includeEmptyDirs="false">
+ <fileset dir="${build.dir}/c++"/>
+ </copy>
+
+ <copy todir="${dist.dir}/" file="build.xml"/>
+
+ <chmod perm="ugo+x" type="file" parallel="false">
+ <fileset dir="${dist.dir}/bin"/>
+ <fileset dir="${dist.dir}/src/contrib/">
+ <include name="*/bin/*" />
+ </fileset>
+ <fileset dir="${dist.dir}/src/contrib/ec2/bin/image"/>
+ </chmod>
+ <chmod perm="ugo+x" type="file">
+ <fileset dir="${dist.dir}/src/c++/pipes/debug"/>
+ </chmod>
+
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Make release tarball -->
+ <!-- ================================================================== -->
+ <target name="tar" depends="package" description="Make release tarball">
+ <macro_tar param.destfile="${build.dir}/${final.name}.tar.gz">
+ <param.listofitems>
+ <tarfileset dir="${build.dir}" mode="664">
+ <exclude name="${final.name}/bin/*" />
+ <exclude name="${final.name}/contrib/*/bin/*" />
+ <exclude name="${final.name}/src/contrib/ec2/bin/*" />
+ <exclude name="${final.name}/src/contrib/ec2/bin/image/*" />
+ <include name="${final.name}/**" />
+ </tarfileset>
+ <tarfileset dir="${build.dir}" mode="755">
+ <include name="${final.name}/bin/*" />
+ <include name="${final.name}/contrib/*/bin/*" />
+ <include name="${final.name}/src/contrib/ec2/bin/*" />
+ <include name="${final.name}/src/contrib/ec2/bin/image/*" />
+ </tarfileset>
+ </param.listofitems>
+ </macro_tar>
+ </target>
+
+ <target name="bin-package" depends="compile, jar, examples, tools-jar, jar-test, ant-tasks, package-librecordio"
+ description="assembles artifacts for binary target">
+ <mkdir dir="${dist.dir}"/>
+ <mkdir dir="${dist.dir}/lib"/>
+ <mkdir dir="${dist.dir}/contrib"/>
+ <mkdir dir="${dist.dir}/bin"/>
+
+ <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true">
+ <fileset dir="${common.ivy.lib.dir}"/>
+ </copy>
+
+ <copy todir="${dist.dir}/lib" includeEmptyDirs="false">
+ <fileset dir="lib">
+ <exclude name="**/native/**"/>
+ </fileset>
+ </copy>
+
+ <exec dir="${dist.dir}" executable="sh" failonerror="true">
+ <env key="BASE_NATIVE_LIB_DIR" value="${lib.dir}/native"/>
+ <env key="BUILD_NATIVE_DIR" value="${build.dir}/native"/>
+ <env key="DIST_LIB_DIR" value="${dist.dir}/lib/native"/>
+ <arg line="${native.src.dir}/packageNativeHadoop.sh"/>
+ </exec>
+
+ <subant target="package">
+ <!--Pass down the version in case its needed again and the target
+ distribution directory so contribs know where to install to.-->
+ <property name="version" value="${version}"/>
+ <property name="dist.dir" value="${dist.dir}"/>
+ <fileset file="${contrib.dir}/build.xml"/>
+ </subant>
+
+ <copy todir="${dist.dir}/webapps">
+ <fileset dir="${build.webapps}"/>
+ </copy>
+
+ <copy todir="${dist.dir}">
+ <fileset file="${build.dir}/${final.name}-*.jar"/>
+ </copy>
+
+ <copy todir="${dist.dir}/bin">
+ <fileset dir="bin"/>
+ </copy>
+
+ <copy todir="${dist.dir}/conf">
+ <fileset dir="${conf.dir}" excludes="**/*.template"/>
+ </copy>
+
+ <copy file="ivy.xml" tofile="${dist.dir}/ivy.xml"/>
+
+ <copy todir="${dist.dir}/ivy">
+ <fileset dir="ivy"/>
+ </copy>
+
+ <copy todir="${dist.dir}">
+ <fileset dir=".">
+ <include name="*.txt" />
+ </fileset>
+ </copy>
+
+ <copy todir="${dist.dir}/c++" includeEmptyDirs="false">
+ <fileset dir="${build.dir}/c++"/>
+ </copy>
+
+ <copy todir="${dist.dir}/" file="build.xml"/>
+
+ <chmod perm="ugo+x" type="file" parallel="false">
+ <fileset dir="${dist.dir}/bin"/>
+ </chmod>
+ </target>
+
+ <target name="binary" depends="bin-package" description="Make tarball without source and documentation">
+ <macro_tar param.destfile="${build.dir}/${final.name}-bin.tar.gz">
+ <param.listofitems>
+ <tarfileset dir="${build.dir}" mode="664">
+ <exclude name="${final.name}/bin/*" />
+ <exclude name="${final.name}/src/**" />
+ <exclude name="${final.name}/docs/**" />
+ <include name="${final.name}/**" />
+ </tarfileset>
+ <tarfileset dir="${build.dir}" mode="755">
+ <include name="${final.name}/bin/*" />
+ </tarfileset>
+ </param.listofitems>
+ </macro_tar>
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Perform audit activities for the release -->
+ <!-- ================================================================== -->
+ <target name="releaseaudit" depends="package,ivy-retrieve-releaseaudit" description="Release Audit activities">
+ <fail unless="rat.present" message="Failed to load class [${rat.reporting.classname}]."/>
+ <java classname="${rat.reporting.classname}" fork="true">
+ <classpath refid="releaseaudit-classpath"/>
+ <arg value="${build.dir}/${final.name}"/>
+ </java>
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Clean. Delete the build files, and their directories -->
+ <!-- ================================================================== -->
+ <target name="clean" depends="clean-contrib" description="Clean. Delete the build files, and their directories">
+ <delete dir="${build.dir}"/>
+ <delete dir="${docs.src}/build"/>
+ <delete dir="${src.docs.cn}/build"/>
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Clean contrib target. For now, must be called explicitly -->
+ <!-- Using subant instead of ant as a workaround for 30569 -->
+ <!-- ================================================================== -->
+ <target name="clean-contrib">
+ <subant target="clean">
+ <fileset file="src/contrib/build.xml"/>
+ </subant>
+ </target>
+
+ <target name="test-c++-libhdfs" depends="compile-c++-libhdfs, compile-core" if="islibhdfs">
+ <delete dir="${test.libhdfs.dir}"/>
+ <mkdir dir="${test.libhdfs.dir}"/>
+ <mkdir dir="${test.libhdfs.dir}/logs"/>
+ <mkdir dir="${test.libhdfs.dir}/hdfs/name"/>
+
+ <exec dir="${build.c++.libhdfs}" executable="${make.cmd}" failonerror="true">
+ <env key="OS_NAME" value="${os.name}"/>
+ <env key="OS_ARCH" value="${os.arch}"/>
+ <env key="JVM_ARCH" value="${jvm.arch}"/>
+ <env key="LIBHDFS_BUILD_DIR" value="${build.c++.libhdfs}"/>
+ <env key="HADOOP_HOME" value="${basedir}"/>
+ <env key="HADOOP_CONF_DIR" value="${test.libhdfs.conf.dir}"/>
+ <env key="HADOOP_LOG_DIR" value="${test.libhdfs.dir}/logs"/>
+ <env key="LIBHDFS_SRC_DIR" value="${c++.libhdfs.src}"/>
+ <env key="LIBHDFS_INSTALL_DIR" value="${install.c++}/lib"/>
+ <env key="LIB_DIR" value="${common.ivy.lib.dir}"/>
+ <arg value="test"/>
+ </exec>
+ </target>
+
+<!-- ================================================================== -->
+<!-- librecordio targets. -->
+<!-- ================================================================== -->
+
+ <target name="compile-librecordio" depends="init" if="librecordio" >
+ <mkdir dir="${build.librecordio}"/>
+ <exec dir="${librecordio.src}" executable="${make.cmd}" failonerror="true">
+ <env key="XERCESCROOT" value="${xercescroot}"/>
+ <env key="LIBRECORDIO_BUILD_DIR" value="${build.librecordio}"/>
+ </exec>
+ </target>
+
+ <target name="test-librecordio" depends="compile-librecordio, compile-core" if="librecordio">
+ <delete dir="${librecordio.test.dir}"/>
+ <mkdir dir="${librecordio.test.dir}"/>
+ <exec dir="${librecordio.src}/test" executable="${make.cmd}" failonerror="true">
+ <env key="HADOOP_HOME" value="${basedir}"/>
+ <env key="XERCESCROOT" value="${xercescroot}"/>
+ <env key="LIBRECORDIO_BUILD_DIR" value="${build.librecordio}"/>
+ <env key="LIBRECORDIO_TEST_DIR" value="${librecordio.test.dir}"/>
+ <arg value="all"/>
+ </exec>
+ </target>
+
+ <target name="package-librecordio" depends="compile-librecordio" if="librecordio">
+ <mkdir dir="${dist.dir}/librecordio"/>
+ <copy todir="${dist.dir}/librecordio">
+ <fileset dir="${build.librecordio}" casesensitive="yes" followsymlinks="false">
+ <exclude name="**/tests/**"/>
+ <exclude name="*.so"/>
+ <exclude name="*.o"/>
+ </fileset>
+ </copy>
+ <chmod perm="ugo+x" type="file">
+ <fileset dir="${dist.dir}/librecordio"/>
+ </chmod>
+ </target>
+
+ <target name="create-c++-configure" depends="init" if="compile.c++">
+ <exec executable="autoreconf" dir="${c++.utils.src}" searchpath="yes"
+ failonerror="yes">
+ <arg value="-if"/>
+ </exec>
+ <exec executable="autoreconf" dir="${c++.pipes.src}" searchpath="yes"
+ failonerror="yes">
+ <arg value="-if"/>
+ </exec>
+ <exec executable="autoreconf" dir="${c++.examples.pipes.src}"
+ searchpath="yes" failonerror="yes">
+ <arg value="-if"/>
+ </exec>
+ <antcall target="create-c++-configure-libhdfs"/>
+ </target>
+
+ <target name="create-c++-configure-libhdfs" depends="check-c++-libhdfs" if="islibhdfs">
+ <exec executable="autoreconf" dir="${c++.libhdfs.src}"
+ searchpath="yes" failonerror="yes">
+ <arg value="-if"/>
+ </exec>
+ </target>
+
+ <target name="check-c++-makefiles" depends="init" if="compile.c++">
+ <condition property="need.c++.utils.makefile">
+ <not> <available file="${build.c++.utils}/Makefile"/> </not>
+ </condition>
+ <condition property="need.c++.pipes.makefile">
+ <not> <available file="${build.c++.pipes}/Makefile"/> </not>
+ </condition>
+ <condition property="need.c++.examples.pipes.makefile">
+ <not> <available file="${build.c++.examples.pipes}/Makefile"/> </not>
+ </condition>
+ </target>
+
+ <target name="check-c++-libhdfs">
+ <condition property="islibhdfs">
+ <and>
+ <isset property="compile.c++"/>
+ <isset property="libhdfs"/>
+ </and>
+ </condition>
+ </target>
+
+ <target name="check-c++-makefile-libhdfs" depends="init,check-c++-libhdfs" if="islibhdfs">
+ <condition property="need.c++.libhdfs.makefile">
+ <not> <available file="${build.c++.libhdfs}/Makefile"/> </not>
+ </condition>
+ </target>
+
+ <target name="create-c++-libhdfs-makefile" depends="check-c++-makefile-libhdfs"
+ if="need.c++.libhdfs.makefile">
+ <mkdir dir="${build.c++.libhdfs}"/>
+ <chmod file="${c++.libhdfs.src}/configure" perm="ugo+x"/>
+ <exec executable="${c++.libhdfs.src}/configure" dir="${build.c++.libhdfs}"
+ failonerror="yes">
+ <env key="ac_cv_func_malloc_0_nonnull" value="yes"/>
+ <env key="JVM_ARCH" value="${jvm.arch}"/>
+ <arg value="--prefix=${install.c++}"/>
+ </exec>
+ </target>
+
+ <target name="create-c++-utils-makefile" depends="check-c++-makefiles"
+ if="need.c++.utils.makefile">
+ <mkdir dir="${build.c++.utils}"/>
+ <exec executable="${c++.utils.src}/configure" dir="${build.c++.utils}"
+ failonerror="yes">
+ <arg value="--prefix=${install.c++}"/>
+ </exec>
+ </target>
+
+ <target name="compile-c++-utils" depends="create-c++-utils-makefile"
+ if="compile.c++">
+ <exec executable="${make.cmd}" dir="${build.c++.utils}" searchpath="yes"
+ failonerror="yes">
+ <arg value="install"/>
+ </exec>
+ </target>
+
+ <target name="create-c++-pipes-makefile" depends="check-c++-makefiles"
+ if="need.c++.pipes.makefile">
+ <mkdir dir="${build.c++.pipes}"/>
+ <exec executable="${c++.pipes.src}/configure" dir="${build.c++.pipes}"
+ failonerror="yes">
+ <arg value="--prefix=${install.c++}"/>
+ </exec>
+ </target>
+
+ <target name="compile-c++-pipes"
+ depends="create-c++-pipes-makefile,compile-c++-utils"
+ if="compile.c++">
+ <exec executable="${make.cmd}" dir="${build.c++.pipes}" searchpath="yes"
+ failonerror="yes">
+ <arg value="install"/>
+ </exec>
+ </target>
+
+ <target name="compile-c++"
+ depends="compile-c++-pipes"/>
+
+ <target name="create-c++-examples-pipes-makefile"
+ depends="check-c++-makefiles"
+ if="need.c++.examples.pipes.makefile">
+ <mkdir dir="${build.c++.examples.pipes}"/>
+ <exec executable="${c++.examples.pipes.src}/configure"
+ dir="${build.c++.examples.pipes}"
+ failonerror="yes">
+ <arg value="--prefix=${install.c++.examples}"/>
+ <arg value="--with-hadoop-utils=${install.c++}"/>
+ <arg value="--with-hadoop-pipes=${install.c++}"/>
+ </exec>
+ </target>
+
+ <target name="compile-c++-examples-pipes"
+ depends="create-c++-examples-pipes-makefile,compile-c++-pipes"
+ if="compile.c++">
+ <exec executable="${make.cmd}" dir="${build.c++.examples.pipes}" searchpath="yes"
+ failonerror="yes">
+ <arg value="install"/>
+ </exec>
+ </target>
+
+ <target name="compile-c++-examples"
+ depends="compile-c++-examples-pipes"/>
+
+ <target name="compile-c++-libhdfs" depends="create-c++-libhdfs-makefile" if="islibhdfs">
+ <exec executable="${make.cmd}" dir="${build.c++.libhdfs}" searchpath="yes"
+ failonerror="yes">
+ <env key="ac_cv_func_malloc_0_nonnull" value="yes"/>
+ <env key="JVM_ARCH" value="${jvm.arch}"/>
+ <arg value="install"/>
+ </exec>
+ </target>
+
+
+
+ <target name="compile-ant-tasks" depends="compile-core">
+ <javac
+ encoding="${build.encoding}"
+ srcdir="${anttasks.dir}"
+ includes="org/apache/hadoop/ant/**/*.java"
+ destdir="${build.anttasks}"
+ debug="${javac.debug}"
+ optimize="${javac.optimize}"
+ target="${javac.version}"
+ source="${javac.version}"
+ deprecation="${javac.deprecation}">
+ <compilerarg line="${javac.args}"/>
+ <classpath refid="classpath"/>
+ </javac>
+ </target>
+
+ <target name="ant-tasks" depends="jar, compile-ant-tasks">
+ <copy file="${anttasks.dir}/org/apache/hadoop/ant/antlib.xml"
+ todir="${build.anttasks}/org/apache/hadoop/ant"/>
+ <jar destfile="${build.dir}/${final.name}-ant.jar">
+ <fileset dir="${build.anttasks}"/>
+ </jar>
+ </target>
+
+
+
+ <target name="clover" depends="clover.setup, clover.info" description="Instrument the Unit tests using Clover. To use, specify -Dclover.home=&lt;base of clover installation&gt; -Drun.clover=true on the command line."/>
+
+<target name="clover.setup" if="clover.enabled">
+ <taskdef resource="cloverlib.xml" classpath="${clover.jar}"/>
+ <mkdir dir="${clover.db.dir}"/>
+ <clover-setup initString="${clover.db.dir}/hadoop_coverage.db">
+ <fileset dir="src" includes="core/**/* tools/**/* hdfs/**/* mapred/**/*"/>
+ </clover-setup>
+</target>
+
+<target name="clover.info" unless="clover.present">
+ <echo>
+ Clover not found. Code coverage reports disabled.
+ </echo>
+</target>
+
+<target name="clover.check">
+ <fail unless="clover.present">
+ ##################################################################
+ Clover not found.
+ Please specify -Dclover.home=&lt;base of clover installation&gt;
+ on the command line.
+ ##################################################################
+ </fail>
+</target>
+
+<target name="generate-clover-reports" depends="clover.check, clover">
+ <mkdir dir="${clover.report.dir}"/>
+ <clover-report>
+ <current outfile="${clover.report.dir}" title="${final.name}">
+ <format type="html"/>
+ </current>
+ </clover-report>
+ <clover-report>
+ <current outfile="${clover.report.dir}/clover.xml" title="${final.name}">
+ <format type="xml"/>
+ </current>
+ </clover-report>
+</target>
+
+<target name="findbugs.check" depends="check-for-findbugs" unless="findbugs.present">
+ <fail message="'findbugs.home' is not defined. Please pass -Dfindbugs.home=&lt;base of Findbugs installation&gt; to Ant on the command-line." />
+</target>
+
+<target name="patch.check" unless="patch.file">
+ <fail message="'patch.file' is not defined. Please pass -Dpatch.file=&lt;location of patch file&gt; to Ant on the command-line." />
+</target>
+
+<target name="test-patch" depends="patch.check,findbugs.check,forrest.check">
+ <exec executable="bash" failonerror="true">
+ <arg value="${basedir}/src/test/bin/test-patch.sh"/>
+ <arg value="DEVELOPER"/>
+ <arg value="${patch.file}"/>
+ <arg value="${scratch.dir}"/>
+ <arg value="${svn.cmd}"/>
+ <arg value="${grep.cmd}"/>
+ <arg value="${patch.cmd}"/>
+ <arg value="${findbugs.home}"/>
+ <arg value="${forrest.home}"/>
+ <arg value="${basedir}"/>
+ <arg value="${java5.home}"/>
+ </exec>
+</target>
+
+<target name="hudson-test-patch" depends="findbugs.check,forrest.check">
+ <exec executable="bash" failonerror="true">
+ <arg value="${basedir}/src/test/bin/test-patch.sh"/>
+ <arg value="HUDSON"/>
+ <arg value="${scratch.dir}"/>
+ <arg value="${support.dir}"/>
+ <arg value="${ps.cmd}"/>
+ <arg value="${wget.cmd}"/>
+ <arg value="${jiracli.cmd}"/>
+ <arg value="${svn.cmd}"/>
+ <arg value="${grep.cmd}"/>
+ <arg value="${patch.cmd}"/>
+ <arg value="${findbugs.home}"/>
+ <arg value="${forrest.home}"/>
+ <arg value="${eclipse.home}"/>
+ <arg value="${python.home}"/>
+ <arg value="${basedir}"/>
+ <arg value="${trigger.url}"/>
+ <arg value="${jira.passwd}"/>
+ <arg value="${java5.home}"/>
+ </exec>
+</target>
+
+ <target name="eclipse-files" depends="init"
+ description="Generate files for Eclipse">
+ <pathconvert property="eclipse.project">
+ <path path="${basedir}"/>
+ <regexpmapper from="^.*/([^/]+)$$" to="\1" handledirsep="yes"/>
+ </pathconvert>
+ <copy todir="." overwrite="true">
+ <fileset dir=".eclipse.templates">
+ <exclude name="**/README.txt"/>
+ </fileset>
+ <filterset>
+ <filter token="PROJECT" value="${eclipse.project}"/>
+ </filterset>
+ </copy>
+ </target>
+
+ <target name="ivy-init-dirs">
+ <mkdir dir="${build.ivy.dir}" />
+ <mkdir dir="${build.ivy.lib.dir}" />
+ <mkdir dir="${build.ivy.report.dir}" />
+ <mkdir dir="${build.ivy.maven.dir}" />
+ </target>
+
+ <target name="ivy-probe-antlib" >
+ <condition property="ivy.found">
+ <typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/>
+ </condition>
+ </target>
+
+ <target name="ivy-download" description="To download ivy" unless="offline">
+ <get src="${ivy_repo_url}" dest="${ivy.jar}" usetimestamp="true"/>
+ </target>
+
+ <!--
+ To avoid Ivy leaking things across big projects, always load Ivy in the same classloader.
+ Also note how we skip loading Ivy if it is already there, just to make sure all is well.
+ -->
+ <target name="ivy-init-antlib" depends="ivy-download,ivy-init-dirs,ivy-probe-antlib" unless="ivy.found">
+ <typedef uri="antlib:org.apache.ivy.ant" onerror="fail"
+ loaderRef="ivyLoader">
+ <classpath>
+ <pathelement location="${ivy.jar}"/>
+ </classpath>
+ </typedef>
+ <fail >
+ <condition >
+ <not>
+ <typefound uri="antlib:org.apache.ivy.ant" name="cleancache"/>
+ </not>
+ </condition>
+ You need Apache Ivy 2.0 or later from http://ant.apache.org/
+ It could not be loaded from ${ivy_repo_url}
+ </fail>
+ </target>
+
+
+ <target name="ivy-init" depends="ivy-init-antlib" >
+
+ <!--Configure Ivy by reading in the settings file
+ If anyone has already read in a settings file into this settings ID, it gets priority
+ -->
+ <ivy:configure settingsid="${ant.project.name}.ivy.settings" file="${ivysettings.xml}" override='false'/>
+ </target>
+
+ <target name="ivy-resolve" depends="ivy-init">
+ <ivy:resolve settingsRef="${ant.project.name}.ivy.settings"/>
+ </target>
+
+ <target name="ivy-resolve-javadoc" depends="ivy-init">
+ <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="javadoc"/>
+ </target>
+
+ <target name="ivy-resolve-releaseaudit" depends="ivy-init">
+ <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="releaseaudit"/>
+ </target>
+
+ <target name="ivy-resolve-test" depends="ivy-init">
+ <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="test" />
+ </target>
+
+ <target name="ivy-resolve-common" depends="ivy-init">
+ <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="common" />
+ </target>
+
+ <target name="ivy-resolve-jdiff" depends="ivy-init">
+ <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="jdiff" />
+ </target>
+
+ <target name="ivy-resolve-checkstyle" depends="ivy-init">
+ <ivy:resolve settingsRef="${ant.project.name}.ivy.settings" conf="checkstyle"/>
+ </target>
+
+ <target name="ivy-retrieve" depends="ivy-resolve"
+ description="Retrieve Ivy-managed artifacts">
+ <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
+ pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/>
+ </target>
+
+ <target name="ivy-retrieve-checkstyle" depends="ivy-resolve-checkstyle"
+ description="Retrieve Ivy-managed artifacts for the checkstyle configurations">
+ <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
+ pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/>
+ <ivy:cachepath pathid="checkstyle-classpath" conf="checkstyle"/>
+ </target>
+
+ <target name="ivy-retrieve-jdiff" depends="ivy-resolve-jdiff"
+ description="Retrieve Ivy-managed artifacts for the javadoc configurations">
+ <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
+ pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/>
+ <ivy:cachepath pathid="jdiff-classpath" conf="jdiff"/>
+ </target>
+
+ <target name="ivy-retrieve-javadoc" depends="ivy-resolve-javadoc"
+ description="Retrieve Ivy-managed artifacts for the javadoc configurations">
+ <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
+ pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/>
+ <ivy:cachepath pathid="javadoc-classpath" conf="javadoc"/>
+ </target>
+
+ <target name="ivy-retrieve-test" depends="ivy-resolve-test"
+ description="Retrieve Ivy-managed artifacts for the test configurations">
+ <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
+ pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/>
+ <ivy:cachepath pathid="test.classpath" conf="test"/>
+ </target>
+
+ <target name="ivy-retrieve-common" depends="ivy-resolve-common"
+ description="Retrieve Ivy-managed artifacts for the compile configurations">
+ <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
+ pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}"/>
+ <ivy:cachepath pathid="ivy-common.classpath" conf="common"/>
+ </target>
+
+ <target name="ivy-retrieve-releaseaudit" depends="ivy-resolve-releaseaudit"
+ description="Retrieve Ivy-managed artifacts for the compile configurations">
+ <ivy:retrieve settingsRef="${ant.project.name}.ivy.settings"
+ pattern="${build.ivy.lib.dir}/${ivy.artifact.retrieve.pattern}" />
+ <ivy:cachepath pathid="releaseaudit-classpath" conf="releaseaudit"/>
+ <available classname="${rat.reporting.classname}"
+ classpathref="releaseaudit-classpath" property="rat.present" value="true"/>
+ </target>
+
+ <target name="ivy-report" depends="ivy-resolve-releaseaudit"
+ description="Generate">
+ <ivy:report todir="${build.ivy.report.dir}" settingsRef="${ant.project.name}.ivy.settings"/>
+ <echo>
+ Reports generated:${build.ivy.report.dir}
+ </echo>
+ </target>
+
+ <target name="assert-hadoop-jar-exists" depends="ivy-init">
+ <fail>
+ <condition >
+ <not>
+ <available file="${hadoop.jar}" />
+ </not>
+ </condition>
+ Not found: ${hadoop.jar}
+ Please run the target "jar" in the main build file
+ </fail>
+
+ </target>
+
+ <target name="ready-to-publish" depends="jar,assert-hadoop-jar-exists,ivy-resolve"/>
+
+ <target name="ivy-publish-local" depends="ready-to-publish,ivy-resolve">
+ <ivy:publish
+ settingsRef="${ant.project.name}.ivy.settings"
+ resolver="local"
+ pubrevision="${hadoop.version}"
+ overwrite="true"
+ artifactspattern="${build.dir}/${ivy.publish.pattern}" />
+ </target>
+
+
+ <!-- this is here for curiosity, to see how well the makepom task works
+ Answer: it depends whether you want transitive dependencies excluded or not
+ -->
+ <target name="makepom" depends="ivy-resolve">
+ <ivy:makepom settingsRef="${ant.project.name}.ivy.settings"
+ ivyfile="ivy.xml"
+ pomfile="${build.ivy.maven.dir}/generated.pom">
+ <ivy:mapping conf="default" scope="default"/>
+ <ivy:mapping conf="master" scope="master"/>
+ <ivy:mapping conf="runtime" scope="runtime"/>
+ </ivy:makepom>
+ </target>
+
+
+ <target name="copy-jar-to-maven" depends="ready-to-publish">
+ <copy file="${hadoop.jar}"
+ tofile="${build.ivy.maven.jar}"/>
+ <checksum file="${build.ivy.maven.jar}" algorithm="md5"/>
+ </target>
+
+ <target name="copypom" depends="ivy-init-dirs">
+
+ <presetdef name="expandingcopy" >
+ <copy overwrite="true">
+ <filterchain>
+ <expandproperties/>
+ </filterchain>
+ </copy>
+ </presetdef>
+
+ <expandingcopy file="ivy/hadoop-core.pom"
+ tofile="${build.ivy.maven.pom}"/>
+ <checksum file="${build.ivy.maven.pom}" algorithm="md5"/>
+ </target>
+
+ <target name="maven-artifacts" depends="copy-jar-to-maven,copypom" />
+
+ <target name="published" depends="ivy-publish-local,maven-artifacts">
+
+ </target>
+
+</project>
diff --git a/lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/Pipes.hh b/lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/Pipes.hh
new file mode 100644
index 0000000000..9a785d966a
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/Pipes.hh
@@ -0,0 +1,258 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HADOOP_PIPES_HH
+#define HADOOP_PIPES_HH
+
+#ifdef SWIG
+%module (directors="1") HadoopPipes
+%include "std_string.i"
+%feature("director") Mapper;
+%feature("director") Reducer;
+%feature("director") Partitioner;
+%feature("director") RecordReader;
+%feature("director") RecordWriter;
+%feature("director") Factory;
+#else
+#include <string>
+#endif
+
+namespace HadoopPipes {
+
+/**
+ * This interface defines the interface between application code and the
+ * foreign code interface to Hadoop Map/Reduce.
+ */
+
+/**
+ * A JobConf defines the properties for a job.
+ */
+class JobConf {
+public:
+ virtual bool hasKey(const std::string& key) const = 0;
+ virtual const std::string& get(const std::string& key) const = 0;
+ virtual int getInt(const std::string& key) const = 0;
+ virtual float getFloat(const std::string& key) const = 0;
+ virtual bool getBoolean(const std::string&key) const = 0;
+ virtual ~JobConf() {}
+};
+
+/**
+ * Task context provides the information about the task and job.
+ */
+class TaskContext {
+public:
+ /**
+ * Counter to keep track of a property and its value.
+ */
+ class Counter {
+ private:
+ int id;
+ public:
+ Counter(int counterId) : id(counterId) {}
+ Counter(const Counter& counter) : id(counter.id) {}
+
+ int getId() const { return id; }
+ };
+
+ /**
+ * Get the JobConf for the current task.
+ */
+ virtual const JobConf* getJobConf() = 0;
+
+ /**
+ * Get the current key.
+ * @return the current key
+ */
+ virtual const std::string& getInputKey() = 0;
+
+ /**
+ * Get the current value.
+ * @return the current value
+ */
+ virtual const std::string& getInputValue() = 0;
+
+ /**
+ * Generate an output record
+ */
+ virtual void emit(const std::string& key, const std::string& value) = 0;
+
+ /**
+ * Mark your task as having made progress without changing the status
+ * message.
+ */
+ virtual void progress() = 0;
+
+ /**
+ * Set the status message and call progress.
+ */
+ virtual void setStatus(const std::string& status) = 0;
+
+ /**
+ * Register a counter with the given group and name.
+ */
+ virtual Counter*
+ getCounter(const std::string& group, const std::string& name) = 0;
+
+ /**
+ * Increment the value of the counter with the given amount.
+ */
+ virtual void incrementCounter(const Counter* counter, uint64_t amount) = 0;
+
+ virtual ~TaskContext() {}
+};
+
+class MapContext: public TaskContext {
+public:
+
+ /**
+ * Access the InputSplit of the mapper.
+ */
+ virtual const std::string& getInputSplit() = 0;
+
+ /**
+ * Get the name of the key class of the input to this task.
+ */
+ virtual const std::string& getInputKeyClass() = 0;
+
+ /**
+ * Get the name of the value class of the input to this task.
+ */
+ virtual const std::string& getInputValueClass() = 0;
+
+};
+
+class ReduceContext: public TaskContext {
+public:
+ /**
+ * Advance to the next value.
+ */
+ virtual bool nextValue() = 0;
+};
+
+class Closable {
+public:
+ virtual void close() {}
+ virtual ~Closable() {}
+};
+
+/**
+ * The application's mapper class to do map.
+ */
+class Mapper: public Closable {
+public:
+ virtual void map(MapContext& context) = 0;
+};
+
+/**
+ * The application's reducer class to do reduce.
+ */
+class Reducer: public Closable {
+public:
+ virtual void reduce(ReduceContext& context) = 0;
+};
+
+/**
+ * User code to decide where each key should be sent.
+ */
+class Partitioner {
+public:
+ virtual int partition(const std::string& key, int numOfReduces) = 0;
+ virtual ~Partitioner() {}
+};
+
+/**
+ * For applications that want to read the input directly for the map function
+ * they can define RecordReaders in C++.
+ */
+class RecordReader: public Closable {
+public:
+ virtual bool next(std::string& key, std::string& value) = 0;
+
+ /**
+ * The progress of the record reader through the split as a value between
+ * 0.0 and 1.0.
+ */
+ virtual float getProgress() = 0;
+};
+
+/**
+ * An object to write key/value pairs as they are emited from the reduce.
+ */
+class RecordWriter: public Closable {
+public:
+ virtual void emit(const std::string& key,
+ const std::string& value) = 0;
+};
+
+/**
+ * A factory to create the necessary application objects.
+ */
+class Factory {
+public:
+ virtual Mapper* createMapper(MapContext& context) const = 0;
+ virtual Reducer* createReducer(ReduceContext& context) const = 0;
+
+ /**
+ * Create a combiner, if this application has one.
+ * @return the new combiner or NULL, if one is not needed
+ */
+ virtual Reducer* createCombiner(MapContext& context) const {
+ return NULL;
+ }
+
+ /**
+ * Create an application partitioner object.
+ * @return the new partitioner or NULL, if the default partitioner should be
+ * used.
+ */
+ virtual Partitioner* createPartitioner(MapContext& context) const {
+ return NULL;
+ }
+
+ /**
+ * Create an application record reader.
+ * @return the new RecordReader or NULL, if the Java RecordReader should be
+ * used.
+ */
+ virtual RecordReader* createRecordReader(MapContext& context) const {
+ return NULL;
+ }
+
+ /**
+ * Create an application record writer.
+ * @return the new RecordWriter or NULL, if the Java RecordWriter should be
+ * used.
+ */
+ virtual RecordWriter* createRecordWriter(ReduceContext& context) const {
+ return NULL;
+ }
+
+ virtual ~Factory() {}
+};
+
+/**
+ * Run the assigned task in the framework.
+ * The user's main function should set the various functions using the
+ * set* functions above and then call this.
+ * @return true, if the task succeeded.
+ */
+bool runTask(const Factory& factory);
+
+}
+
+#endif
diff --git a/lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/SerialUtils.hh b/lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/SerialUtils.hh
new file mode 100644
index 0000000000..16cbab65b2
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/SerialUtils.hh
@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HADOOP_SERIAL_UTILS_HH
+#define HADOOP_SERIAL_UTILS_HH
+
+#include <string>
+
+namespace HadoopUtils {
+
+ /**
+ * A simple exception class that records a message for the user.
+ */
+ class Error {
+ private:
+ std::string error;
+ public:
+
+ /**
+ * Create an error object with the given message.
+ */
+ Error(const std::string& msg);
+
+ /**
+ * Construct an error object with the given message that was created on
+ * the given file, line, and functino.
+ */
+ Error(const std::string& msg,
+ const std::string& file, int line, const std::string& function);
+
+ /**
+ * Get the error message.
+ */
+ const std::string& getMessage() const;
+ };
+
+ /**
+ * Check to make sure that the condition is true, and throw an exception
+ * if it is not. The exception will contain the message and a description
+ * of the source location.
+ */
+ #define HADOOP_ASSERT(CONDITION, MESSAGE) \
+ { \
+ if (!(CONDITION)) { \
+ throw HadoopUtils::Error((MESSAGE), __FILE__, __LINE__, \
+ __PRETTY_FUNCTION__); \
+ } \
+ }
+
+ /**
+ * An interface for an input stream.
+ */
+ class InStream {
+ public:
+ /**
+ * Reads len bytes from the stream into the buffer.
+ * @param buf the buffer to read into
+ * @param buflen the length of the buffer
+ * @throws Error if there are problems reading
+ */
+ virtual void read(void *buf, size_t len) = 0;
+ virtual ~InStream() {}
+ };
+
+ /**
+ * An interface for an output stream.
+ */
+ class OutStream {
+ public:
+ /**
+ * Write the given buffer to the stream.
+ * @param buf the data to write
+ * @param len the number of bytes to write
+ * @throws Error if there are problems writing
+ */
+ virtual void write(const void *buf, size_t len) = 0;
+ /**
+ * Flush the data to the underlying store.
+ */
+ virtual void flush() = 0;
+ virtual ~OutStream() {}
+ };
+
+ /**
+ * A class to read a file as a stream.
+ */
+ class FileInStream : public InStream {
+ public:
+ FileInStream();
+ bool open(const std::string& name);
+ bool open(FILE* file);
+ void read(void *buf, size_t buflen);
+ bool skip(size_t nbytes);
+ bool close();
+ virtual ~FileInStream();
+ private:
+ /**
+ * The file to write to.
+ */
+ FILE *mFile;
+ /**
+ * Does is this class responsible for closing the FILE*?
+ */
+ bool isOwned;
+ };
+
+ /**
+ * A class to write a stream to a file.
+ */
+ class FileOutStream: public OutStream {
+ public:
+
+ /**
+ * Create a stream that isn't bound to anything.
+ */
+ FileOutStream();
+
+ /**
+ * Create the given file, potentially overwriting an existing file.
+ */
+ bool open(const std::string& name, bool overwrite);
+ bool open(FILE* file);
+ void write(const void* buf, size_t len);
+ bool advance(size_t nbytes);
+ void flush();
+ bool close();
+ virtual ~FileOutStream();
+ private:
+ FILE *mFile;
+ bool isOwned;
+ };
+
+ /**
+ * A stream that reads from a string.
+ */
+ class StringInStream: public InStream {
+ public:
+ StringInStream(const std::string& str);
+ virtual void read(void *buf, size_t buflen);
+ private:
+ const std::string& buffer;
+ std::string::const_iterator itr;
+ };
+
+ void serializeInt(int32_t t, OutStream& stream);
+ int32_t deserializeInt(InStream& stream);
+ void serializeLong(int64_t t, OutStream& stream);
+ int64_t deserializeLong(InStream& stream);
+ void serializeFloat(float t, OutStream& stream);
+ float deserializeFloat(InStream& stream);
+ void serializeString(const std::string& t, OutStream& stream);
+ void deserializeString(std::string& t, InStream& stream);
+}
+
+#endif
diff --git a/lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/StringUtils.hh b/lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/StringUtils.hh
new file mode 100644
index 0000000000..4720172725
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/StringUtils.hh
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HADOOP_STRING_UTILS_HH
+#define HADOOP_STRING_UTILS_HH
+
+#include <stdint.h>
+#include <string>
+#include <vector>
+
+namespace HadoopUtils {
+
+ /**
+ * Convert an integer to a string.
+ */
+ std::string toString(int32_t x);
+
+ /**
+ * Convert a string to an integer.
+ * @throws Error if the string is not a valid integer
+ */
+ int32_t toInt(const std::string& val);
+
+ /**
+ * Convert the string to a float.
+ * @throws Error if the string is not a valid float
+ */
+ float toFloat(const std::string& val);
+
+ /**
+ * Convert the string to a boolean.
+ * @throws Error if the string is not a valid boolean value
+ */
+ bool toBool(const std::string& val);
+
+ /**
+ * Get the current time in the number of milliseconds since 1970.
+ */
+ uint64_t getCurrentMillis();
+
+ /**
+ * Split a string into "words". Multiple deliminators are treated as a single
+ * word break, so no zero-length words are returned.
+ * @param str the string to split
+ * @param separator a list of characters that divide words
+ */
+ std::vector<std::string> splitString(const std::string& str,
+ const char* separator);
+
+ /**
+ * Quote a string to avoid "\", non-printable characters, and the
+ * deliminators.
+ * @param str the string to quote
+ * @param deliminators the set of characters to always quote
+ */
+ std::string quoteString(const std::string& str,
+ const char* deliminators);
+
+ /**
+ * Unquote the given string to return the original string.
+ * @param str the string to unquote
+ */
+ std::string unquoteString(const std::string& str);
+
+}
+
+#endif
diff --git a/lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/TemplateFactory.hh b/lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/TemplateFactory.hh
new file mode 100644
index 0000000000..22e10ae56f
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-amd64-64/include/hadoop/TemplateFactory.hh
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HADOOP_PIPES_TEMPLATE_FACTORY_HH
+#define HADOOP_PIPES_TEMPLATE_FACTORY_HH
+
+namespace HadoopPipes {
+
+ template <class mapper, class reducer>
+ class TemplateFactory2: public Factory {
+ public:
+ Mapper* createMapper(MapContext& context) const {
+ return new mapper(context);
+ }
+ Reducer* createReducer(ReduceContext& context) const {
+ return new reducer(context);
+ }
+ };
+
+ template <class mapper, class reducer, class partitioner>
+ class TemplateFactory3: public TemplateFactory2<mapper,reducer> {
+ public:
+ Partitioner* createPartitioner(MapContext& context) const {
+ return new partitioner(context);
+ }
+ };
+
+ template <class mapper, class reducer>
+ class TemplateFactory3<mapper, reducer, void>
+ : public TemplateFactory2<mapper,reducer> {
+ };
+
+ template <class mapper, class reducer, class partitioner, class combiner>
+ class TemplateFactory4
+ : public TemplateFactory3<mapper,reducer,partitioner>{
+ public:
+ Reducer* createCombiner(MapContext& context) const {
+ return new combiner(context);
+ }
+ };
+
+ template <class mapper, class reducer, class partitioner>
+ class TemplateFactory4<mapper,reducer,partitioner,void>
+ : public TemplateFactory3<mapper,reducer,partitioner>{
+ };
+
+ template <class mapper, class reducer, class partitioner,
+ class combiner, class recordReader>
+ class TemplateFactory5
+ : public TemplateFactory4<mapper,reducer,partitioner,combiner>{
+ public:
+ RecordReader* createRecordReader(MapContext& context) const {
+ return new recordReader(context);
+ }
+ };
+
+ template <class mapper, class reducer, class partitioner,class combiner>
+ class TemplateFactory5<mapper,reducer,partitioner,combiner,void>
+ : public TemplateFactory4<mapper,reducer,partitioner,combiner>{
+ };
+
+ template <class mapper, class reducer, class partitioner=void,
+ class combiner=void, class recordReader=void,
+ class recordWriter=void>
+ class TemplateFactory
+ : public TemplateFactory5<mapper,reducer,partitioner,combiner,recordReader>{
+ public:
+ RecordWriter* createRecordWriter(ReduceContext& context) const {
+ return new recordWriter(context);
+ }
+ };
+
+ template <class mapper, class reducer, class partitioner,
+ class combiner, class recordReader>
+ class TemplateFactory<mapper, reducer, partitioner, combiner, recordReader,
+ void>
+ : public TemplateFactory5<mapper,reducer,partitioner,combiner,recordReader>{
+ };
+
+}
+
+#endif
diff --git a/lib/hadoop-0.20.0/c++/Linux-amd64-64/lib/libhadooppipes.a b/lib/hadoop-0.20.0/c++/Linux-amd64-64/lib/libhadooppipes.a
new file mode 100644
index 0000000000..be303140cb
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-amd64-64/lib/libhadooppipes.a
Binary files differ
diff --git a/lib/hadoop-0.20.0/c++/Linux-amd64-64/lib/libhadooputils.a b/lib/hadoop-0.20.0/c++/Linux-amd64-64/lib/libhadooputils.a
new file mode 100644
index 0000000000..8a0aded98e
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-amd64-64/lib/libhadooputils.a
Binary files differ
diff --git a/lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/Pipes.hh b/lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/Pipes.hh
new file mode 100644
index 0000000000..9a785d966a
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/Pipes.hh
@@ -0,0 +1,258 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HADOOP_PIPES_HH
+#define HADOOP_PIPES_HH
+
+#ifdef SWIG
+%module (directors="1") HadoopPipes
+%include "std_string.i"
+%feature("director") Mapper;
+%feature("director") Reducer;
+%feature("director") Partitioner;
+%feature("director") RecordReader;
+%feature("director") RecordWriter;
+%feature("director") Factory;
+#else
+#include <string>
+#endif
+
+namespace HadoopPipes {
+
+/**
+ * This interface defines the interface between application code and the
+ * foreign code interface to Hadoop Map/Reduce.
+ */
+
+/**
+ * A JobConf defines the properties for a job.
+ */
+class JobConf {
+public:
+ virtual bool hasKey(const std::string& key) const = 0;
+ virtual const std::string& get(const std::string& key) const = 0;
+ virtual int getInt(const std::string& key) const = 0;
+ virtual float getFloat(const std::string& key) const = 0;
+ virtual bool getBoolean(const std::string&key) const = 0;
+ virtual ~JobConf() {}
+};
+
+/**
+ * Task context provides the information about the task and job.
+ */
+class TaskContext {
+public:
+ /**
+ * Counter to keep track of a property and its value.
+ */
+ class Counter {
+ private:
+ int id;
+ public:
+ Counter(int counterId) : id(counterId) {}
+ Counter(const Counter& counter) : id(counter.id) {}
+
+ int getId() const { return id; }
+ };
+
+ /**
+ * Get the JobConf for the current task.
+ */
+ virtual const JobConf* getJobConf() = 0;
+
+ /**
+ * Get the current key.
+ * @return the current key
+ */
+ virtual const std::string& getInputKey() = 0;
+
+ /**
+ * Get the current value.
+ * @return the current value
+ */
+ virtual const std::string& getInputValue() = 0;
+
+ /**
+ * Generate an output record
+ */
+ virtual void emit(const std::string& key, const std::string& value) = 0;
+
+ /**
+ * Mark your task as having made progress without changing the status
+ * message.
+ */
+ virtual void progress() = 0;
+
+ /**
+ * Set the status message and call progress.
+ */
+ virtual void setStatus(const std::string& status) = 0;
+
+ /**
+ * Register a counter with the given group and name.
+ */
+ virtual Counter*
+ getCounter(const std::string& group, const std::string& name) = 0;
+
+ /**
+ * Increment the value of the counter with the given amount.
+ */
+ virtual void incrementCounter(const Counter* counter, uint64_t amount) = 0;
+
+ virtual ~TaskContext() {}
+};
+
+class MapContext: public TaskContext {
+public:
+
+ /**
+ * Access the InputSplit of the mapper.
+ */
+ virtual const std::string& getInputSplit() = 0;
+
+ /**
+ * Get the name of the key class of the input to this task.
+ */
+ virtual const std::string& getInputKeyClass() = 0;
+
+ /**
+ * Get the name of the value class of the input to this task.
+ */
+ virtual const std::string& getInputValueClass() = 0;
+
+};
+
+class ReduceContext: public TaskContext {
+public:
+ /**
+ * Advance to the next value.
+ */
+ virtual bool nextValue() = 0;
+};
+
+class Closable {
+public:
+ virtual void close() {}
+ virtual ~Closable() {}
+};
+
+/**
+ * The application's mapper class to do map.
+ */
+class Mapper: public Closable {
+public:
+ virtual void map(MapContext& context) = 0;
+};
+
+/**
+ * The application's reducer class to do reduce.
+ */
+class Reducer: public Closable {
+public:
+ virtual void reduce(ReduceContext& context) = 0;
+};
+
+/**
+ * User code to decide where each key should be sent.
+ */
+class Partitioner {
+public:
+ virtual int partition(const std::string& key, int numOfReduces) = 0;
+ virtual ~Partitioner() {}
+};
+
+/**
+ * For applications that want to read the input directly for the map function
+ * they can define RecordReaders in C++.
+ */
+class RecordReader: public Closable {
+public:
+ virtual bool next(std::string& key, std::string& value) = 0;
+
+ /**
+ * The progress of the record reader through the split as a value between
+ * 0.0 and 1.0.
+ */
+ virtual float getProgress() = 0;
+};
+
+/**
+ * An object to write key/value pairs as they are emited from the reduce.
+ */
+class RecordWriter: public Closable {
+public:
+ virtual void emit(const std::string& key,
+ const std::string& value) = 0;
+};
+
+/**
+ * A factory to create the necessary application objects.
+ */
+class Factory {
+public:
+ virtual Mapper* createMapper(MapContext& context) const = 0;
+ virtual Reducer* createReducer(ReduceContext& context) const = 0;
+
+ /**
+ * Create a combiner, if this application has one.
+ * @return the new combiner or NULL, if one is not needed
+ */
+ virtual Reducer* createCombiner(MapContext& context) const {
+ return NULL;
+ }
+
+ /**
+ * Create an application partitioner object.
+ * @return the new partitioner or NULL, if the default partitioner should be
+ * used.
+ */
+ virtual Partitioner* createPartitioner(MapContext& context) const {
+ return NULL;
+ }
+
+ /**
+ * Create an application record reader.
+ * @return the new RecordReader or NULL, if the Java RecordReader should be
+ * used.
+ */
+ virtual RecordReader* createRecordReader(MapContext& context) const {
+ return NULL;
+ }
+
+ /**
+ * Create an application record writer.
+ * @return the new RecordWriter or NULL, if the Java RecordWriter should be
+ * used.
+ */
+ virtual RecordWriter* createRecordWriter(ReduceContext& context) const {
+ return NULL;
+ }
+
+ virtual ~Factory() {}
+};
+
+/**
+ * Run the assigned task in the framework.
+ * The user's main function should set the various functions using the
+ * set* functions above and then call this.
+ * @return true, if the task succeeded.
+ */
+bool runTask(const Factory& factory);
+
+}
+
+#endif
diff --git a/lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/SerialUtils.hh b/lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/SerialUtils.hh
new file mode 100644
index 0000000000..16cbab65b2
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/SerialUtils.hh
@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HADOOP_SERIAL_UTILS_HH
+#define HADOOP_SERIAL_UTILS_HH
+
+#include <string>
+
+namespace HadoopUtils {
+
+ /**
+ * A simple exception class that records a message for the user.
+ */
+ class Error {
+ private:
+ std::string error;
+ public:
+
+ /**
+ * Create an error object with the given message.
+ */
+ Error(const std::string& msg);
+
+ /**
+ * Construct an error object with the given message that was created on
+ * the given file, line, and functino.
+ */
+ Error(const std::string& msg,
+ const std::string& file, int line, const std::string& function);
+
+ /**
+ * Get the error message.
+ */
+ const std::string& getMessage() const;
+ };
+
+ /**
+ * Check to make sure that the condition is true, and throw an exception
+ * if it is not. The exception will contain the message and a description
+ * of the source location.
+ */
+ #define HADOOP_ASSERT(CONDITION, MESSAGE) \
+ { \
+ if (!(CONDITION)) { \
+ throw HadoopUtils::Error((MESSAGE), __FILE__, __LINE__, \
+ __PRETTY_FUNCTION__); \
+ } \
+ }
+
+ /**
+ * An interface for an input stream.
+ */
+ class InStream {
+ public:
+ /**
+ * Reads len bytes from the stream into the buffer.
+ * @param buf the buffer to read into
+ * @param buflen the length of the buffer
+ * @throws Error if there are problems reading
+ */
+ virtual void read(void *buf, size_t len) = 0;
+ virtual ~InStream() {}
+ };
+
+ /**
+ * An interface for an output stream.
+ */
+ class OutStream {
+ public:
+ /**
+ * Write the given buffer to the stream.
+ * @param buf the data to write
+ * @param len the number of bytes to write
+ * @throws Error if there are problems writing
+ */
+ virtual void write(const void *buf, size_t len) = 0;
+ /**
+ * Flush the data to the underlying store.
+ */
+ virtual void flush() = 0;
+ virtual ~OutStream() {}
+ };
+
+ /**
+ * A class to read a file as a stream.
+ */
+ class FileInStream : public InStream {
+ public:
+ FileInStream();
+ bool open(const std::string& name);
+ bool open(FILE* file);
+ void read(void *buf, size_t buflen);
+ bool skip(size_t nbytes);
+ bool close();
+ virtual ~FileInStream();
+ private:
+ /**
+ * The file to write to.
+ */
+ FILE *mFile;
+ /**
+ * Does is this class responsible for closing the FILE*?
+ */
+ bool isOwned;
+ };
+
+ /**
+ * A class to write a stream to a file.
+ */
+ class FileOutStream: public OutStream {
+ public:
+
+ /**
+ * Create a stream that isn't bound to anything.
+ */
+ FileOutStream();
+
+ /**
+ * Create the given file, potentially overwriting an existing file.
+ */
+ bool open(const std::string& name, bool overwrite);
+ bool open(FILE* file);
+ void write(const void* buf, size_t len);
+ bool advance(size_t nbytes);
+ void flush();
+ bool close();
+ virtual ~FileOutStream();
+ private:
+ FILE *mFile;
+ bool isOwned;
+ };
+
+ /**
+ * A stream that reads from a string.
+ */
+ class StringInStream: public InStream {
+ public:
+ StringInStream(const std::string& str);
+ virtual void read(void *buf, size_t buflen);
+ private:
+ const std::string& buffer;
+ std::string::const_iterator itr;
+ };
+
+ void serializeInt(int32_t t, OutStream& stream);
+ int32_t deserializeInt(InStream& stream);
+ void serializeLong(int64_t t, OutStream& stream);
+ int64_t deserializeLong(InStream& stream);
+ void serializeFloat(float t, OutStream& stream);
+ float deserializeFloat(InStream& stream);
+ void serializeString(const std::string& t, OutStream& stream);
+ void deserializeString(std::string& t, InStream& stream);
+}
+
+#endif
diff --git a/lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/StringUtils.hh b/lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/StringUtils.hh
new file mode 100644
index 0000000000..4720172725
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/StringUtils.hh
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HADOOP_STRING_UTILS_HH
+#define HADOOP_STRING_UTILS_HH
+
+#include <stdint.h>
+#include <string>
+#include <vector>
+
+namespace HadoopUtils {
+
+ /**
+ * Convert an integer to a string.
+ */
+ std::string toString(int32_t x);
+
+ /**
+ * Convert a string to an integer.
+ * @throws Error if the string is not a valid integer
+ */
+ int32_t toInt(const std::string& val);
+
+ /**
+ * Convert the string to a float.
+ * @throws Error if the string is not a valid float
+ */
+ float toFloat(const std::string& val);
+
+ /**
+ * Convert the string to a boolean.
+ * @throws Error if the string is not a valid boolean value
+ */
+ bool toBool(const std::string& val);
+
+ /**
+ * Get the current time in the number of milliseconds since 1970.
+ */
+ uint64_t getCurrentMillis();
+
+ /**
+ * Split a string into "words". Multiple deliminators are treated as a single
+ * word break, so no zero-length words are returned.
+ * @param str the string to split
+ * @param separator a list of characters that divide words
+ */
+ std::vector<std::string> splitString(const std::string& str,
+ const char* separator);
+
+ /**
+ * Quote a string to avoid "\", non-printable characters, and the
+ * deliminators.
+ * @param str the string to quote
+ * @param deliminators the set of characters to always quote
+ */
+ std::string quoteString(const std::string& str,
+ const char* deliminators);
+
+ /**
+ * Unquote the given string to return the original string.
+ * @param str the string to unquote
+ */
+ std::string unquoteString(const std::string& str);
+
+}
+
+#endif
diff --git a/lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/TemplateFactory.hh b/lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/TemplateFactory.hh
new file mode 100644
index 0000000000..22e10ae56f
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-i386-32/include/hadoop/TemplateFactory.hh
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef HADOOP_PIPES_TEMPLATE_FACTORY_HH
+#define HADOOP_PIPES_TEMPLATE_FACTORY_HH
+
+namespace HadoopPipes {
+
+ template <class mapper, class reducer>
+ class TemplateFactory2: public Factory {
+ public:
+ Mapper* createMapper(MapContext& context) const {
+ return new mapper(context);
+ }
+ Reducer* createReducer(ReduceContext& context) const {
+ return new reducer(context);
+ }
+ };
+
+ template <class mapper, class reducer, class partitioner>
+ class TemplateFactory3: public TemplateFactory2<mapper,reducer> {
+ public:
+ Partitioner* createPartitioner(MapContext& context) const {
+ return new partitioner(context);
+ }
+ };
+
+ template <class mapper, class reducer>
+ class TemplateFactory3<mapper, reducer, void>
+ : public TemplateFactory2<mapper,reducer> {
+ };
+
+ template <class mapper, class reducer, class partitioner, class combiner>
+ class TemplateFactory4
+ : public TemplateFactory3<mapper,reducer,partitioner>{
+ public:
+ Reducer* createCombiner(MapContext& context) const {
+ return new combiner(context);
+ }
+ };
+
+ template <class mapper, class reducer, class partitioner>
+ class TemplateFactory4<mapper,reducer,partitioner,void>
+ : public TemplateFactory3<mapper,reducer,partitioner>{
+ };
+
+ template <class mapper, class reducer, class partitioner,
+ class combiner, class recordReader>
+ class TemplateFactory5
+ : public TemplateFactory4<mapper,reducer,partitioner,combiner>{
+ public:
+ RecordReader* createRecordReader(MapContext& context) const {
+ return new recordReader(context);
+ }
+ };
+
+ template <class mapper, class reducer, class partitioner,class combiner>
+ class TemplateFactory5<mapper,reducer,partitioner,combiner,void>
+ : public TemplateFactory4<mapper,reducer,partitioner,combiner>{
+ };
+
+ template <class mapper, class reducer, class partitioner=void,
+ class combiner=void, class recordReader=void,
+ class recordWriter=void>
+ class TemplateFactory
+ : public TemplateFactory5<mapper,reducer,partitioner,combiner,recordReader>{
+ public:
+ RecordWriter* createRecordWriter(ReduceContext& context) const {
+ return new recordWriter(context);
+ }
+ };
+
+ template <class mapper, class reducer, class partitioner,
+ class combiner, class recordReader>
+ class TemplateFactory<mapper, reducer, partitioner, combiner, recordReader,
+ void>
+ : public TemplateFactory5<mapper,reducer,partitioner,combiner,recordReader>{
+ };
+
+}
+
+#endif
diff --git a/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhadooppipes.a b/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhadooppipes.a
new file mode 100644
index 0000000000..73debcec92
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhadooppipes.a
Binary files differ
diff --git a/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhadooputils.a b/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhadooputils.a
new file mode 100644
index 0000000000..6753169b8a
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhadooputils.a
Binary files differ
diff --git a/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.la b/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.la
new file mode 100644
index 0000000000..b6ce94229f
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.la
@@ -0,0 +1,41 @@
+# libhdfs.la - a libtool library file
+# Generated by ltmain.sh (GNU libtool) 2.2
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='libhdfs.so.0'
+
+# Names of this library.
+library_names='libhdfs.so.0.0.0 libhdfs.so.0 libhdfs.so'
+
+# The name of the static archive.
+old_library=''
+
+# Linker flags that can not go in dependency_libs.
+inherited_linker_flags=''
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/home/hadoopqa/tools/java/latest1.6-32/jre/lib/i386/server -ljvm -ldl -lpthread'
+
+# Names of additional weak libraries provided by this library
+weak_library_names=''
+
+# Version information for libhdfs.
+current=0
+age=0
+revision=0
+
+# Is this an already installed library?
+installed=yes
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=no
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir='/home/ndaley/hadoop/branch-0.20/build/c++/Linux-i386-32/lib'
diff --git a/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.so b/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.so
new file mode 100644
index 0000000000..358d582d43
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.so
Binary files differ
diff --git a/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.so.0 b/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.so.0
new file mode 100644
index 0000000000..358d582d43
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.so.0
Binary files differ
diff --git a/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.so.0.0.0 b/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.so.0.0.0
new file mode 100644
index 0000000000..358d582d43
--- /dev/null
+++ b/lib/hadoop-0.20.0/c++/Linux-i386-32/lib/libhdfs.so.0.0.0
Binary files differ
diff --git a/lib/hadoop-0.20.0/conf/capacity-scheduler.xml b/lib/hadoop-0.20.0/conf/capacity-scheduler.xml
new file mode 100644
index 0000000000..d22a3964b4
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/capacity-scheduler.xml
@@ -0,0 +1,156 @@
+<?xml version="1.0"?>
+
+<!-- This is the configuration file for the resource manager in Hadoop. -->
+<!-- You can configure various scheduling parameters related to queues. -->
+<!-- The properties for a queue follow a naming convention,such as, -->
+<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
+
+<configuration>
+
+ <property>
+ <name>mapred.capacity-scheduler.queue.default.guaranteed-capacity</name>
+ <value>100</value>
+ <description>Percentage of the number of slots in the cluster that are
+ guaranteed to be available for jobs in this queue.
+ </description>
+ </property>
+
+ <property>
+ <name>mapred.capacity-scheduler.queue.default.reclaim-time-limit</name>
+ <value>300</value>
+ <description>The amount of time, in seconds, before which
+ resources distributed to other queues will be reclaimed.
+ </description>
+ </property>
+
+ <property>
+ <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
+ <value>false</value>
+ <description>If true, priorities of jobs will be taken into
+ account in scheduling decisions.
+ </description>
+ </property>
+
+ <property>
+ <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
+ <value>100</value>
+ <description> Each queue enforces a limit on the percentage of resources
+ allocated to a user at any given time, if there is competition for them.
+ This user limit can vary between a minimum and maximum value. The former
+ depends on the number of users who have submitted jobs, and the latter is
+ set to this property value. For example, suppose the value of this
+ property is 25. If two users have submitted jobs to a queue, no single
+ user can use more than 50% of the queue resources. If a third user submits
+ a job, no single user can use more than 33% of the queue resources. With 4
+ or more users, no user can use more than 25% of the queue's resources. A
+ value of 100 implies no user limits are imposed.
+ </description>
+ </property>
+ <property>
+ <name>mapred.capacity-scheduler.queue.default.maximum-initialized-jobs-per-user</name>
+ <value>2</value>
+ <description>The maximum number of jobs to be pre-initialized for a user
+ of the job queue.
+ </description>
+ </property>
+
+
+ <property>
+ <name>mapred.capacity-scheduler.reclaimCapacity.interval</name>
+ <value>5</value>
+ <description>The time interval, in seconds, between which the scheduler
+ periodically determines whether capacity needs to be reclaimed for
+ any queue.
+ </description>
+ </property>
+
+ <!-- The default configuration settings for the capacity task scheduler -->
+ <!-- The default values would be applied to all the queues which don't have -->
+ <!-- the appropriate property for the particular queue -->
+ <property>
+ <name>mapred.capacity-scheduler.default-reclaim-time-limit</name>
+ <value>300</value>
+ <description>The amount of time, in seconds, before which
+ resources distributed to other queues will be reclaimed by default
+ in a job queue.
+ </description>
+ </property>
+
+ <property>
+ <name>mapred.capacity-scheduler.default-supports-priority</name>
+ <value>false</value>
+ <description>If true, priorities of jobs will be taken into
+ account in scheduling decisions by default in a job queue.
+ </description>
+ </property>
+
+ <property>
+ <name>mapred.capacity-scheduler.task.default-pmem-percentage-in-vmem</name>
+ <value>-1</value>
+ <description>If mapred.task.maxpmem is set to -1, this configuration will
+ be used to calculate job's physical memory requirements as a percentage of
+ the job's virtual memory requirements set via mapred.task.maxvmem. This
+ property thus provides default value of physical memory for job's that
+ don't explicitly specify physical memory requirements.
+
+ If not explicitly set to a valid value, scheduler will not consider
+ physical memory for scheduling even if virtual memory based scheduling is
+ enabled(by setting valid values for both mapred.task.default.maxvmem and
+ mapred.task.limit.maxvmem).
+ </description>
+ </property>
+
+ <property>
+ <name>mapred.capacity-scheduler.task.limit.maxpmem</name>
+ <value>-1</value>
+ <description>Configuration that provides an upper limit on the maximum
+ physical memory that can be specified by a job. The job configuration
+ mapred.task.maxpmem should be less than this value. If not, the job will
+ be rejected by the scheduler.
+
+ If it is set to -1, scheduler will not consider physical memory for
+ scheduling even if virtual memory based scheduling is enabled(by setting
+ valid values for both mapred.task.default.maxvmem and
+ mapred.task.limit.maxvmem).
+ </description>
+ </property>
+
+ <property>
+ <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
+ <value>100</value>
+ <description>The percentage of the resources limited to a particular user
+ for the job queue at any given point of time by default.
+ </description>
+ </property>
+
+ <property>
+ <name>mapred.capacity-scheduler.default-maximum-initialized-jobs-per-user</name>
+ <value>2</value>
+ <description>The maximum number of jobs to be pre-initialized for a user
+ of the job queue.
+ </description>
+ </property>
+
+
+ <!-- Capacity scheduler Job Initialization configuration parameters -->
+ <property>
+ <name>mapred.capacity-scheduler.init-poll-interval</name>
+ <value>5000</value>
+ <description>The amount of time in miliseconds which is used to poll
+ the job queues for jobs to initialize.
+ </description>
+ </property>
+ <property>
+ <name>mapred.capacity-scheduler.init-worker-threads</name>
+ <value>5</value>
+ <description>Number of worker threads which would be used by
+ Initialization poller to initialize jobs in a set of queue.
+ If number mentioned in property is equal to number of job queues
+ then a single thread would initialize jobs in a queue. If lesser
+ then a thread would get a set of queues assigned. If the number
+ is greater then number of threads would be equal to number of
+ job queues.
+ </description>
+ </property>
+
+</configuration>
diff --git a/lib/hadoop-0.20.0/conf/configuration.xsl b/lib/hadoop-0.20.0/conf/configuration.xsl
new file mode 100644
index 0000000000..377cdbeb93
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/configuration.xsl
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+<tr>
+ <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+ <td><xsl:value-of select="value"/></td>
+ <td><xsl:value-of select="description"/></td>
+</tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>
diff --git a/lib/hadoop-0.20.0/conf/core-site.xml b/lib/hadoop-0.20.0/conf/core-site.xml
new file mode 100644
index 0000000000..970c8fe0e8
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/core-site.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+</configuration>
diff --git a/lib/hadoop-0.20.0/conf/hadoop-env.sh b/lib/hadoop-0.20.0/conf/hadoop-env.sh
new file mode 100644
index 0000000000..ada5bef1c7
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/hadoop-env.sh
@@ -0,0 +1,54 @@
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use. Required.
+# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
+
+# Extra Java CLASSPATH elements. Optional.
+# export HADOOP_CLASSPATH=
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HADOOP_HEAPSIZE=2000
+
+# Extra Java runtime options. Empty by default.
+# export HADOOP_OPTS=-server
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
+export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
+export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
+export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
+# export HADOOP_TASKTRACKER_OPTS=
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+# export HADOOP_CLIENT_OPTS
+
+# Extra ssh options. Empty by default.
+# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored. $HADOOP_HOME/logs by default.
+# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
+
+# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from. Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+# export HADOOP_PID_DIR=/var/hadoop/pids
+
+# A string representing this instance of hadoop. $USER by default.
+# export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HADOOP_NICENESS=10
diff --git a/lib/hadoop-0.20.0/conf/hadoop-metrics.properties b/lib/hadoop-0.20.0/conf/hadoop-metrics.properties
new file mode 100644
index 0000000000..d04dffc438
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/hadoop-metrics.properties
@@ -0,0 +1,40 @@
+# Configuration of the "dfs" context for null
+dfs.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "dfs" context for file
+#dfs.class=org.apache.hadoop.metrics.file.FileContext
+#dfs.period=10
+#dfs.fileName=/tmp/dfsmetrics.log
+
+# Configuration of the "dfs" context for ganglia
+# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# dfs.period=10
+# dfs.servers=localhost:8649
+
+
+# Configuration of the "mapred" context for null
+mapred.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "mapred" context for file
+#mapred.class=org.apache.hadoop.metrics.file.FileContext
+#mapred.period=10
+#mapred.fileName=/tmp/mrmetrics.log
+
+# Configuration of the "mapred" context for ganglia
+# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# mapred.period=10
+# mapred.servers=localhost:8649
+
+
+# Configuration of the "jvm" context for null
+jvm.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "jvm" context for file
+#jvm.class=org.apache.hadoop.metrics.file.FileContext
+#jvm.period=10
+#jvm.fileName=/tmp/jvmmetrics.log
+
+# Configuration of the "jvm" context for ganglia
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# jvm.period=10
+# jvm.servers=localhost:8649
diff --git a/lib/hadoop-0.20.0/conf/hadoop-policy.xml b/lib/hadoop-0.20.0/conf/hadoop-policy.xml
new file mode 100644
index 0000000000..ef48f2bbed
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/hadoop-policy.xml
@@ -0,0 +1,97 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>security.client.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ClientProtocol, which is used by user code
+ via the DistributedFileSystem.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.client.datanode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+ for block recovery.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.datanode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for DatanodeProtocol, which is used by datanodes to
+ communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.inter.datanode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+ for updating generation timestamp.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.namenode.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for NamenodeProtocol, the protocol used by the secondary
+ namenode to communicate with the namenode.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.inter.tracker.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for InterTrackerProtocol, used by the tasktrackers to
+ communicate with the jobtracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.job.submission.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for JobSubmissionProtocol, used by job clients to
+ communciate with the jobtracker for job submission, querying job status etc.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.task.umbilical.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+ tasks to communicate with the parent tasktracker.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+ <property>
+ <name>security.refresh.policy.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+ dfsadmin and mradmin commands to refresh the security policy in-effect.
+ The ACL is a comma-separated list of user and group names. The user and
+ group list is separated by a blank. For e.g. "alice,bob users,wheel".
+ A special value of "*" means all users are allowed.</description>
+ </property>
+
+</configuration>
diff --git a/lib/hadoop-0.20.0/conf/hdfs-site.xml b/lib/hadoop-0.20.0/conf/hdfs-site.xml
new file mode 100644
index 0000000000..970c8fe0e8
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/hdfs-site.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+</configuration>
diff --git a/lib/hadoop-0.20.0/conf/log4j.properties b/lib/hadoop-0.20.0/conf/log4j.properties
new file mode 100644
index 0000000000..d797df6dab
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/log4j.properties
@@ -0,0 +1,94 @@
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+#
+# FSNamesystem Audit logging
+# All audit events are logged at INFO level
+#
+log4j.logger.org.apache.hadoop.fs.FSNamesystem.audit=WARN
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.metrics.jvm.EventCounter
diff --git a/lib/hadoop-0.20.0/conf/mapred-site.xml b/lib/hadoop-0.20.0/conf/mapred-site.xml
new file mode 100644
index 0000000000..970c8fe0e8
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/mapred-site.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+</configuration>
diff --git a/lib/hadoop-0.20.0/conf/masters b/lib/hadoop-0.20.0/conf/masters
new file mode 100644
index 0000000000..2fbb50c4a8
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/masters
@@ -0,0 +1 @@
+localhost
diff --git a/lib/hadoop-0.20.0/conf/slaves b/lib/hadoop-0.20.0/conf/slaves
new file mode 100644
index 0000000000..2fbb50c4a8
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/slaves
@@ -0,0 +1 @@
+localhost
diff --git a/lib/hadoop-0.20.0/conf/ssl-client.xml.example b/lib/hadoop-0.20.0/conf/ssl-client.xml.example
new file mode 100644
index 0000000000..ec3fd41fa8
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/ssl-client.xml.example
@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+
+<property>
+ <name>ssl.client.truststore.location</name>
+ <value></value>
+ <description>Truststore to be used by clients like distcp. Must be
+ specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.truststore.password</name>
+ <value></value>
+ <description>Optional. Default value is "".
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.truststore.type</name>
+ <value>jks</value>
+ <description>Optional. Default value is "jks".
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.keystore.location</name>
+ <value></value>
+ <description>Keystore to be used by clients like distcp. Must be
+ specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.keystore.password</name>
+ <value></value>
+ <description>Optional. Default value is "".
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.keystore.keypassword</name>
+ <value></value>
+ <description>Optional. Default value is "".
+ </description>
+</property>
+
+<property>
+ <name>ssl.client.keystore.type</name>
+ <value>jks</value>
+ <description>Optional. Default value is "jks".
+ </description>
+</property>
+
+</configuration>
diff --git a/lib/hadoop-0.20.0/conf/ssl-server.xml.example b/lib/hadoop-0.20.0/conf/ssl-server.xml.example
new file mode 100644
index 0000000000..22e9cb0ebb
--- /dev/null
+++ b/lib/hadoop-0.20.0/conf/ssl-server.xml.example
@@ -0,0 +1,55 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+
+<property>
+ <name>ssl.server.truststore.location</name>
+ <value></value>
+ <description>Truststore to be used by NN and DN. Must be specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.truststore.password</name>
+ <value></value>
+ <description>Optional. Default value is "".
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.truststore.type</name>
+ <value>jks</value>
+ <description>Optional. Default value is "jks".
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.keystore.location</name>
+ <value></value>
+ <description>Keystore to be used by NN and DN. Must be specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.keystore.password</name>
+ <value></value>
+ <description>Must be specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.keystore.keypassword</name>
+ <value></value>
+ <description>Must be specified.
+ </description>
+</property>
+
+<property>
+ <name>ssl.server.keystore.type</name>
+ <value>jks</value>
+ <description>Optional. Default value is "jks".
+ </description>
+</property>
+
+</configuration>
diff --git a/lib/hadoop-0.20.0/contrib/capacity-scheduler/hadoop-0.20.0-capacity-scheduler.jar b/lib/hadoop-0.20.0/contrib/capacity-scheduler/hadoop-0.20.0-capacity-scheduler.jar
new file mode 100644
index 0000000000..b4900e565e
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/capacity-scheduler/hadoop-0.20.0-capacity-scheduler.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/contrib/datajoin/hadoop-0.20.0-datajoin.jar b/lib/hadoop-0.20.0/contrib/datajoin/hadoop-0.20.0-datajoin.jar
new file mode 100644
index 0000000000..21294d4d1d
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/datajoin/hadoop-0.20.0-datajoin.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/contrib/eclipse-plugin/hadoop-0.20.0-eclipse-plugin.jar b/lib/hadoop-0.20.0/contrib/eclipse-plugin/hadoop-0.20.0-eclipse-plugin.jar
new file mode 100644
index 0000000000..7b316393f6
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/eclipse-plugin/hadoop-0.20.0-eclipse-plugin.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/contrib/fairscheduler/hadoop-0.20.0-fairscheduler.jar b/lib/hadoop-0.20.0/contrib/fairscheduler/hadoop-0.20.0-fairscheduler.jar
new file mode 100644
index 0000000000..758b98367c
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/fairscheduler/hadoop-0.20.0-fairscheduler.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/README b/lib/hadoop-0.20.0/contrib/hdfsproxy/README
new file mode 100644
index 0000000000..2c33988926
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/README
@@ -0,0 +1,30 @@
+HDFSPROXY is an HTTPS proxy server that exposes the same HSFTP interface as a
+real cluster. It authenticates users via user certificates and enforce access
+control based on configuration files.
+
+Starting up an HDFSPROXY server is similar to starting up an HDFS cluster.
+Simply run "hdfsproxy" shell command. The main configuration file is
+hdfsproxy-default.xml, which should be on the classpath. hdfsproxy-env.sh
+can be used to set up environmental variables. In particular, JAVA_HOME should
+be set. Additional configuration files include user-certs.xml,
+user-permissions.xml and ssl-server.xml, which are used to specify allowed user
+certs, allowed directories/files, and ssl keystore information for the proxy,
+respectively. The location of these files can be specified in
+hdfsproxy-default.xml. Environmental variable HDFSPROXY_CONF_DIR can be used to
+point to the directory where these configuration files are located. The
+configuration files of the proxied HDFS cluster should also be available on the
+classpath (hdfs-default.xml and hdfs-site.xml).
+
+Mirroring those used in HDFS, a few shell scripts are provided to start and
+stop a group of proxy servers. The hosts to run hdfsproxy on are specified in
+hdfsproxy-hosts file, one host per line. All hdfsproxy servers are stateless
+and run independently from each other. Simple load balancing can be set up by
+mapping all hdfsproxy server IP addresses to a single hostname. Users should
+use that hostname to access the proxy. If an IP address look up for that
+hostname returns more than one IP addresses, an HFTP/HSFTP client will randomly
+pick one to use.
+
+Command "hdfsproxy -reloadPermFiles" can be used to trigger reloading of
+user-certs.xml and user-permissions.xml files on all proxy servers listed in
+the hdfsproxy-hosts file. Similarly, "hdfsproxy -clearUgiCache" command can be
+used to clear the UGI caches on all proxy servers.
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy
new file mode 100755
index 0000000000..1b1e597891
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy
@@ -0,0 +1,170 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# The HdfsProxy command script
+#
+# Environment Variables
+#
+# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+#
+# HDFSPROXY_CLASSPATH Extra Java CLASSPATH entries.
+#
+# HDFSPROXY_HEAPSIZE The maximum amount of heap to use, in MB.
+# Default is 1000.
+#
+# HDFSPROXY_OPTS Extra Java runtime options.
+#
+# HDFSPROXY_NAMENODE_OPTS These options are added to HDFSPROXY_OPTS
+# HDFSPROXY_CLIENT_OPTS when the respective command is run.
+# HDFSPROXY_{COMMAND}_OPTS etc HDFSPROXY_JT_OPTS applies to JobTracker
+# for e.g. HDFSPROXY_CLIENT_OPTS applies to
+# more than one command (fs, dfs, fsck,
+# dfsadmin etc)
+#
+# HDFSPROXY_CONF_DIR Alternate conf dir. Default is ${HDFSPROXY_HOME}/conf.
+#
+# HDFSPROXY_ROOT_LOGGER The root appender. Default is INFO,console
+#
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfsproxy-config.sh
+
+cygwin=false
+case "`uname`" in
+CYGWIN*) cygwin=true;;
+esac
+
+if [ -f "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh" ]; then
+ . "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh"
+fi
+
+# some Java parameters
+if [ "$JAVA_HOME" != "" ]; then
+ #echo "run java in $JAVA_HOME"
+ JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+ echo "Error: JAVA_HOME is not set."
+ exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# check envvars which might override default args
+if [ "$HDFSPROXY_HEAPSIZE" != "" ]; then
+ #echo "run with heapsize $HDFSPROXY_HEAPSIZE"
+ JAVA_HEAP_MAX="-Xmx""$HDFSPROXY_HEAPSIZE""m"
+ #echo $JAVA_HEAP_MAX
+fi
+
+# CLASSPATH initially contains $HDFSPROXY_CONF_DIR
+CLASSPATH="${HDFSPROXY_CONF_DIR}"
+CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
+
+# for developers, add HdfsProxy classes to CLASSPATH
+if [ -d "$HDFSPROXY_HOME/build/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/classes
+fi
+if [ -d "$HDFSPROXY_HOME/build/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build
+fi
+if [ -d "$HDFSPROXY_HOME/build/test/classes" ]; then
+ CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/test/classes
+fi
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+# for releases, add hdfsproxy jar & webapps to CLASSPATH
+if [ -d "$HDFSPROXY_HOME/webapps" ]; then
+ CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME
+fi
+for f in $HDFSPROXY_HOME/hdfsproxy-*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add libs to CLASSPATH
+if [ -d "$HDFSPROXY_HOME/lib" ]; then
+ for f in $HDFSPROXY_HOME/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+fi
+
+if [ -d "$HDFSPROXY_HOME/../../" ]; then
+ for f in $HDFSPROXY_HOME/../../*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+fi
+if [ -d "$HDFSPROXY_HOME/../../lib" ]; then
+ for f in $HDFSPROXY_HOME/../../lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+fi
+if [ -d "$HDFSPROXY_HOME/../../lib/jsp-2.1" ]; then
+ for f in $HDFSPROXY_HOME/../../lib/jsp-2.1/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+fi
+
+
+# add user-specified CLASSPATH last
+if [ "$HDFSPROXY_CLASSPATH" != "" ]; then
+ CLASSPATH=${CLASSPATH}:${HDFSPROXY_CLASSPATH}
+fi
+
+# default log directory & file
+if [ "$HDFSPROXY_LOG_DIR" = "" ]; then
+ HDFSPROXY_LOG_DIR="$HDFSPROXY_HOME/logs"
+fi
+if [ "$HDFSPROXY_LOGFILE" = "" ]; then
+ HDFSPROXY_LOGFILE='hdfsproxy.log'
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+# figure out which class to run
+CLASS='org.apache.hadoop.hdfsproxy.HdfsProxy'
+
+# cygwin path translation
+if $cygwin; then
+ CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+ HDFSPROXY_HOME=`cygpath -d "$HDFSPROXY_HOME"`
+ HDFSPROXY_LOG_DIR=`cygpath -d "$HDFSPROXY_LOG_DIR"`
+fi
+
+# cygwin path translation
+if $cygwin; then
+ JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
+fi
+
+HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.log.dir=$HDFSPROXY_LOG_DIR"
+HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.log.file=$HDFSPROXY_LOGFILE"
+HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.home.dir=$HDFSPROXY_HOME"
+HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.id.str=$HDFSPROXY_IDENT_STRING"
+HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.root.logger=${HDFSPROXY_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+ HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+
+# run it
+exec "$JAVA" $JAVA_HEAP_MAX $HDFSPROXY_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-config.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-config.sh
new file mode 100755
index 0000000000..8fe6aac68b
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-config.sh
@@ -0,0 +1,67 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# included in all the hadoop scripts with source command
+# should not be executable directly
+# also should not be passed any arguments, since we need original $*
+
+# resolve links - $0 may be a softlink
+
+this="$0"
+while [ -h "$this" ]; do
+ ls=`ls -ld "$this"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '.*/.*' > /dev/null; then
+ this="$link"
+ else
+ this=`dirname "$this"`/"$link"
+ fi
+done
+
+# convert relative path to absolute path
+bin=`dirname "$this"`
+script=`basename "$this"`
+bin=`cd "$bin"; pwd`
+this="$bin/$script"
+
+# the root of the HdfsProxy installation
+export HDFSPROXY_HOME=`dirname "$this"`/..
+
+#check to see if the conf dir is given as an optional argument
+if [ $# -gt 1 ]
+then
+ if [ "--config" = "$1" ]
+ then
+ shift
+ confdir=$1
+ shift
+ HDFSPROXY_CONF_DIR=$confdir
+ fi
+fi
+
+# Allow alternate conf dir location.
+HDFSPROXY_CONF_DIR="${HDFSPROXY_CONF_DIR:-$HDFSPROXY_HOME/conf}"
+
+#check to see it is specified whether to use the slaves file
+if [ $# -gt 1 ]
+then
+ if [ "--hosts" = "$1" ]
+ then
+ shift
+ slavesfile=$1
+ shift
+ export HDFSPROXY_SLAVES="${HDFSPROXY_CONF_DIR}/$slavesfile"
+ fi
+fi
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh
new file mode 100755
index 0000000000..6d5a75247f
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh
@@ -0,0 +1,141 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Runs a HdfsProxy as a daemon.
+#
+# Environment Variables
+#
+# HDFSPROXY_CONF_DIR Alternate conf dir. Default is ${HDFSPROXY_HOME}/conf.
+# HDFSPROXY_LOG_DIR Where log files are stored. PWD by default.
+# HDFSPROXY_MASTER host:path where hdfsproxy code should be rsync'd from
+# HDFSPROXY_PID_DIR The pid files are stored. /tmp by default.
+# HDFSPROXY_IDENT_STRING A string representing this instance of hdfsproxy. $USER by default
+# HDFSPROXY_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+usage="Usage: hdfsproxy-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] (start|stop) "
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfsproxy-config.sh
+
+# get arguments
+startStop=$1
+shift
+
+hdfsproxy_rotate_log ()
+{
+ log=$1;
+ num=5;
+ if [ -n "$2" ]; then
+ num=$2
+ fi
+ if [ -f "$log" ]; then # rotate logs
+ while [ $num -gt 1 ]; do
+ prev=`expr $num - 1`
+ [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+ num=$prev
+ done
+ mv "$log" "$log.$num";
+ fi
+}
+
+if [ -f "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh" ]; then
+ . "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh"
+fi
+
+# get log directory
+if [ "$HDFSPROXY_LOG_DIR" = "" ]; then
+ export HDFSPROXY_LOG_DIR="$HDFSPROXY_HOME/logs"
+fi
+mkdir -p "$HDFSPROXY_LOG_DIR"
+
+if [ "$HDFSPROXY_PID_DIR" = "" ]; then
+ HDFSPROXY_PID_DIR=/tmp
+fi
+
+if [ "$HDFSPROXY_IDENT_STRING" = "" ]; then
+ export HDFSPROXY_IDENT_STRING="$USER"
+fi
+
+# some variables
+export HDFSPROXY_LOGFILE=hdfsproxy-$HDFSPROXY_IDENT_STRING-$HOSTNAME.log
+export HDFSPROXY_ROOT_LOGGER="INFO,DRFA"
+log=$HDFSPROXY_LOG_DIR/hdfsproxy-$HDFSPROXY_IDENT_STRING-$HOSTNAME.out
+pid=$HDFSPROXY_PID_DIR/hdfsproxy-$HDFSPROXY_IDENT_STRING.pid
+
+# Set default scheduling priority
+if [ "$HDFSPROXY_NICENESS" = "" ]; then
+ export HDFSPROXY_NICENESS=0
+fi
+
+case $startStop in
+
+ (start)
+
+ mkdir -p "$HDFSPROXY_PID_DIR"
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo hdfsproxy running as process `cat $pid`. Stop it first.
+ exit 1
+ fi
+ fi
+
+ if [ "$HDFSPROXY_MASTER" != "" ]; then
+ echo rsync from $HDFSPROXY_MASTER
+ rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HDFSPROXY_MASTER/ "$HDFSPROXY_HOME"
+ fi
+
+ hdfsproxy_rotate_log $log
+ echo starting hdfsproxy, logging to $log
+ cd "$HDFSPROXY_HOME"
+ nohup nice -n $HDFSPROXY_NICENESS "$HDFSPROXY_HOME"/bin/hdfsproxy --config $HDFSPROXY_CONF_DIR "$@" > "$log" 2>&1 < /dev/null &
+ echo $! > $pid
+ sleep 1; head "$log"
+ ;;
+
+ (stop)
+
+ if [ -f $pid ]; then
+ if kill -0 `cat $pid` > /dev/null 2>&1; then
+ echo stopping hdfsproxy
+ kill `cat $pid`
+ else
+ echo no hdfsproxy to stop
+ fi
+ else
+ echo no hdfsproxy to stop
+ fi
+ ;;
+
+ (*)
+ echo $usage
+ exit 1
+ ;;
+
+esac
+
+
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemons.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemons.sh
new file mode 100755
index 0000000000..7dd8568a3b
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-daemons.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a HdfsProxy command on all slave hosts.
+
+usage="Usage: hdfsproxy-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] "
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. $bin/hdfsproxy-config.sh
+
+exec "$bin/hdfsproxy-slaves.sh" --config $HDFSPROXY_CONF_DIR cd "$HDFSPROXY_HOME" \; "$bin/hdfsproxy-daemon.sh" --config $HDFSPROXY_CONF_DIR "$@"
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh
new file mode 100755
index 0000000000..db54bd5b38
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Run a shell command on all slave hosts.
+#
+# Environment Variables
+#
+# HDFSPROXY_SLAVES File naming remote hosts.
+# Default is ${HDFSPROXY_CONF_DIR}/hdfsproxy-hosts.
+# HDFSPROXY_CONF_DIR Alternate conf dir. Default is ${HDFSPROXY_HOME}/conf.
+# HDFSPROXY_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+# HDFSPROXY_SSH_OPTS Options passed to ssh when running remote commands.
+##
+
+usage="Usage: hdfsproxy-slaves.sh [--config confdir] command..."
+
+# if no args specified, show usage
+if [ $# -le 0 ]; then
+ echo $usage
+ exit 1
+fi
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfsproxy-config.sh
+
+# If the slaves file is specified in the command line,
+# then it takes precedence over the definition in
+# hdfsproxy-env.sh. Save it here.
+HOSTLIST=$HDFSPROXY_SLAVES
+
+if [ -f "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh" ]; then
+ . "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh"
+fi
+
+if [ "$HOSTLIST" = "" ]; then
+ if [ "$HDFSPROXY_SLAVES" = "" ]; then
+ export HOSTLIST="${HDFSPROXY_CONF_DIR}/hdfsproxy-hosts"
+ else
+ export HOSTLIST="${HDFSPROXY_SLAVES}"
+ fi
+fi
+
+for slave in `cat "$HOSTLIST"`; do
+ ssh $HDFSPROXY_SSH_OPTS $slave $"${@// /\\ }" \
+ 2>&1 | sed "s/^/$slave: /" &
+ if [ "$HDFSPROXY_SLAVE_SLEEP" != "" ]; then
+ sleep $HDFSPROXY_SLAVE_SLEEP
+ fi
+done
+
+wait
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/start-hdfsproxy.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/start-hdfsproxy.sh
new file mode 100755
index 0000000000..2592d9c8cc
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/start-hdfsproxy.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start hdfsproxy daemons.
+# Run this on master node.
+
+usage="Usage: start-hdfsproxy.sh"
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfsproxy-config.sh
+
+# get arguments
+if [ $# -ge 1 ]; then
+ echo $usage
+ exit 1
+fi
+
+# start hdfsproxy daemons
+# "$bin"/hdfsproxy-daemon.sh --config $HDFSPROXY_CONF_DIR start
+"$bin"/hdfsproxy-daemons.sh --config $HDFSPROXY_CONF_DIR --hosts hdfsproxy-hosts start
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/stop-hdfsproxy.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/stop-hdfsproxy.sh
new file mode 100755
index 0000000000..78089e31cf
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/bin/stop-hdfsproxy.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Stop hdfsproxy daemons. Run this on master node.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfsproxy-config.sh
+
+# "$bin"/hdfsproxy-daemon.sh --config $HDFSPROXY_CONF_DIR stop
+"$bin"/hdfsproxy-daemons.sh --config $HDFSPROXY_CONF_DIR --hosts hdfsproxy-hosts stop
+
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/build.xml b/lib/hadoop-0.20.0/contrib/hdfsproxy/build.xml
new file mode 100644
index 0000000000..e62b2f279a
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/build.xml
@@ -0,0 +1,183 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<project name="hdfsproxy" default="jar">
+ <property name="hdfsproxyVersion" value="1.0"/>
+ <property name="final.name" value="${ant.project.name}-${hdfsproxyVersion}"/>
+ <property name="bin.dir" value="${basedir}/bin"/>
+ <property name="lib.dir" value="${basedir}/lib"/>
+ <property name="conf.dir" value="${basedir}/conf"/>
+ <property name="docs.dir" value="${basedir}/docs"/>
+ <import file="../build-contrib.xml"/>
+
+ <target name="jar" depends="compile" description="Create jar">
+ <echo>
+ Building the .jar files.
+ </echo>
+ <jar jarfile="${build.dir}/${final.name}.jar" basedir="${build.classes}" includes="org/apache/hadoop/hdfsproxy/**/*.class" >
+ <manifest>
+ <section name="org/apache/hadoop/hdfsproxy">
+ <attribute name="Implementation-Title" value="HdfsProxy"/>
+ <attribute name="Implementation-Version" value="${hdfsproxyVersion}"/>
+ <attribute name="Implementation-Vendor" value="Apache"/>
+ </section>
+ </manifest>
+
+ </jar>
+ </target>
+
+ <!-- ====================================================== -->
+ <!-- Macro definitions -->
+ <!-- ====================================================== -->
+ <macrodef name="macro_tar" description="Worker Macro for tar">
+ <attribute name="param.destfile"/>
+ <element name="param.listofitems"/>
+ <sequential>
+ <tar compression="gzip" longfile="gnu"
+ destfile="@{param.destfile}">
+ <param.listofitems/>
+ </tar>
+ </sequential>
+ </macrodef>
+
+ <!-- ================================================================== -->
+ <!-- D I S T R I B U T I O N -->
+ <!-- ================================================================== -->
+ <!-- -->
+ <!-- ================================================================== -->
+ <target name="local-package" depends="jar" description="Package in local build directory">
+ <mkdir dir="${build.dir}/${final.name}"/>
+ <mkdir dir="${build.dir}/${final.name}/logs"/>
+ <copy todir="${build.dir}/${final.name}" includeEmptyDirs="false">
+ <fileset dir="${build.dir}">
+ <include name="*.jar" />
+ <include name="*.war" />
+ </fileset>
+ </copy>
+ <copy todir="${build.dir}/${final.name}/lib" includeEmptyDirs="false">
+ <fileset dir="${common.ivy.lib.dir}">
+ <include name="commons-logging-${commons-logging.version}"/>
+ <include name="commons-logging-api-${commons-logging-api.version}.jar"/>
+ <include name="junit-${junit.version}.jar"/>
+ <include name="log4j-${log4j.version}.jar"/>
+ <include name="slf4j-api-${slf4j-api.version}.jar"/>
+ <include name="slf4j-log4j${slf4j-log4j.version}.jar"/>
+ <include name="xmlenc-${xmlenc.version}.jar"/>
+ <include name="jetty-${jetty.version}.jar"/>
+ <include name="servlet-api-${servlet-api-2.5.version}.jar"/>
+ <include name="core-${core.vesion}"/>
+ </fileset>
+ <fileset dir="${hadoop.root}/lib/jsp-${jsp.version}">
+ <include name="jsp-${jsp.version}"/>
+ <include name="jsp-api-${jsp-api.vesion}"/>
+ </fileset>
+ </copy>
+
+ <copy todir="${build.dir}/${final.name}/lib" includeEmptyDirs="false">
+ <fileset dir="${hadoop.root}/build">
+ <include name="*-core.jar"/>
+ <include name="*-tools.jar"/>
+ </fileset>
+ </copy>
+
+ <copy todir="${build.dir}/${final.name}/bin">
+ <fileset dir="${bin.dir}"/>
+ </copy>
+
+ <copy todir="${build.dir}/${final.name}/conf">
+ <fileset dir="${conf.dir}"/>
+ </copy>
+
+ <copy todir="${build.dir}/${final.name}">
+ <fileset dir="${basedir}">
+ <include name="README" />
+ <include name="build.xml" />
+ <include name="*.txt" />
+ </fileset>
+ </copy>
+
+ <copy todir="${build.dir}/${final.name}/src" includeEmptyDirs="true">
+ <fileset dir="${src.dir}" excludes="**/*.template **/docs/build/**/*"/>
+ </copy>
+
+ <chmod perm="ugo+x" type="file" parallel="false">
+ <fileset dir="${build.dir}/${final.name}/bin"/>
+ </chmod>
+
+ </target>
+
+ <target name="package" depends="local-package" description="Build distribution">
+ <mkdir dir="${dist.dir}/contrib/${name}"/>
+ <copy todir="${dist.dir}/contrib/${name}">
+ <fileset dir="${build.dir}/${final.name}">
+ <exclude name="**/lib/**" />
+ <exclude name="**/src/**" />
+ </fileset>
+ </copy>
+ <chmod dir="${dist.dir}/contrib/${name}/bin" perm="a+x" includes="*"/>
+ </target>
+
+ <!-- ================================================================== -->
+ <!-- Make release tarball -->
+ <!-- ================================================================== -->
+ <target name="tar" depends="local-package" description="Make release tarball">
+ <macro_tar param.destfile="${build.dir}/${final.name}.tar.gz">
+ <param.listofitems>
+ <tarfileset dir="${build.dir}" mode="664">
+ <exclude name="${final.name}/bin/*" />
+ <include name="${final.name}/**" />
+ </tarfileset>
+ <tarfileset dir="${build.dir}" mode="755">
+ <include name="${final.name}/bin/*" />
+ </tarfileset>
+ </param.listofitems>
+ </macro_tar>
+ </target>
+
+ <target name="binary" depends="local-package" description="Make tarball without source and documentation">
+ <macro_tar param.destfile="${build.dir}/${final.name}-bin.tar.gz">
+ <param.listofitems>
+ <tarfileset dir="${build.dir}" mode="664">
+ <exclude name="${final.name}/bin/*" />
+ <exclude name="${final.name}/src/**" />
+ <exclude name="${final.name}/docs/**" />
+ <include name="${final.name}/**" />
+ </tarfileset>
+ <tarfileset dir="${build.dir}" mode="755">
+ <include name="${final.name}/bin/*" />
+ </tarfileset>
+ </param.listofitems>
+ </macro_tar>
+ </target>
+
+ <!-- the unit test classpath -->
+ <path id="test.classpath">
+ <pathelement location="${build.test}" />
+ <pathelement location="${hadoop.root}/build/test/classes"/>
+ <pathelement location="${hadoop.root}/src/contrib/test"/>
+ <pathelement location="${hadoop.root}/conf"/>
+ <pathelement location="${hadoop.root}/build"/>
+ <pathelement location="${hadoop.root}/build/classes"/>
+ <pathelement location="${hadoop.root}/build/tools"/>
+ <pathelement location="${build.examples}"/>
+ <path refid="contrib-classpath"/>
+ </path>
+
+
+</project>
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/configuration.xsl b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/configuration.xsl
new file mode 100644
index 0000000000..377cdbeb93
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/configuration.xsl
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+<tr>
+ <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+ <td><xsl:value-of select="value"/></td>
+ <td><xsl:value-of select="description"/></td>
+</tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-default.xml b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-default.xml
new file mode 100644
index 0000000000..0d2a006c8e
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-default.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put hdfsproxy specific properties in this file. -->
+
+<configuration>
+
+<property>
+ <name>hdfsproxy.https.address</name>
+ <value>0.0.0.0:50479</value>
+ <description>the SSL port that hdfsproxy listens on
+ </description>
+</property>
+
+<property>
+ <name>hdfsproxy.hosts</name>
+ <value>hdfsproxy-hosts</value>
+ <description>location of hdfsproxy-hosts file
+ </description>
+</property>
+
+<property>
+ <name>hdfsproxy.dfs.namenode.address</name>
+ <value></value>
+ <description>namenode address of the HDFS cluster being proxied
+ </description>
+</property>
+
+<property>
+ <name>hdfsproxy.https.server.keystore.resource</name>
+ <value>ssl-server.xml</value>
+ <description>location of the resource from which ssl server keystore
+ information will be extracted
+ </description>
+</property>
+
+<property>
+ <name>hdfsproxy.user.permissions.file.location</name>
+ <value>user-permissions.xml</value>
+ <description>location of the user permissions file
+ </description>
+</property>
+
+<property>
+ <name>hdfsproxy.user.certs.file.location</name>
+ <value>user-certs.xml</value>
+ <description>location of the user certs file
+ </description>
+</property>
+
+<property>
+ <name>hdfsproxy.ugi.cache.ugi.lifetime</name>
+ <value>15</value>
+ <description> The lifetime (in minutes) of a cached ugi
+ </description>
+</property>
+
+</configuration>
+
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh
new file mode 100644
index 0000000000..a0ff7a5d27
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh
@@ -0,0 +1,44 @@
+# Set HdfsProxy-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use. Required.
+# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
+
+# Extra Java CLASSPATH elements. Optional.
+# export HDFSPROXY_CLASSPATH=
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HDFSPROXY_HEAPSIZE=2000
+
+# Extra Java runtime options. Empty by default.
+# export HDFSPROXY_OPTS=
+
+# Extra ssh options. Empty by default.
+# export HDFSPROXY_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HDFSPROXY_CONF_DIR"
+
+# Where log files are stored. $HDFSPROXY_HOME/logs by default.
+# export HDFSPROXY_LOG_DIR=${HDFSPROXY_HOME}/logs
+
+# File naming remote slave hosts. $HDFSPROXY_HOME/conf/slaves by default.
+# export HDFSPROXY_SLAVES=${HDFSPROXY_HOME}/conf/slaves
+
+# host:path where hdfsproxy code should be rsync'd from. Unset by default.
+# export HDFSPROXY_MASTER=master:/home/$USER/src/hdfsproxy
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HDFSPROXY_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+# export HDFSPROXY_PID_DIR=/var/hdfsproxy/pids
+
+# A string representing this instance of hdfsproxy. $USER by default.
+# export HDFSPROXY_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HDFSPROXY_NICENESS=10
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh.template b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh.template
new file mode 100644
index 0000000000..a0ff7a5d27
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-env.sh.template
@@ -0,0 +1,44 @@
+# Set HdfsProxy-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use. Required.
+# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
+
+# Extra Java CLASSPATH elements. Optional.
+# export HDFSPROXY_CLASSPATH=
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HDFSPROXY_HEAPSIZE=2000
+
+# Extra Java runtime options. Empty by default.
+# export HDFSPROXY_OPTS=
+
+# Extra ssh options. Empty by default.
+# export HDFSPROXY_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HDFSPROXY_CONF_DIR"
+
+# Where log files are stored. $HDFSPROXY_HOME/logs by default.
+# export HDFSPROXY_LOG_DIR=${HDFSPROXY_HOME}/logs
+
+# File naming remote slave hosts. $HDFSPROXY_HOME/conf/slaves by default.
+# export HDFSPROXY_SLAVES=${HDFSPROXY_HOME}/conf/slaves
+
+# host:path where hdfsproxy code should be rsync'd from. Unset by default.
+# export HDFSPROXY_MASTER=master:/home/$USER/src/hdfsproxy
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HDFSPROXY_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+# export HDFSPROXY_PID_DIR=/var/hdfsproxy/pids
+
+# A string representing this instance of hdfsproxy. $USER by default.
+# export HDFSPROXY_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HDFSPROXY_NICENESS=10
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-hosts b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-hosts
new file mode 100644
index 0000000000..2fbb50c4a8
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/hdfsproxy-hosts
@@ -0,0 +1 @@
+localhost
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/log4j.properties b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/log4j.properties
new file mode 100644
index 0000000000..2520ab3795
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/log4j.properties
@@ -0,0 +1,61 @@
+# Define some default values that can be overridden by system properties
+hdfsproxy.root.logger=INFO,console
+hdfsproxy.log.dir=.
+hdfsproxy.log.file=hdfsproxy.log
+
+# Define the root logger to the system property "hdfsproxy.root.logger".
+log4j.rootLogger=${hdfsproxy.root.logger}
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hdfsproxy.log.dir}/${hdfsproxy.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hdfsproxy.log.dir}/${hdfsproxy.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.hdfsproxy.HttpsProxy=DEBUG
+#log4j.logger.org.apache.hadoop.hdfsproxy.ProxyFilter=DEBUG
+
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-certs.xml b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-certs.xml
new file mode 100644
index 0000000000..f572a55294
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-certs.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+
+This file defines the mappings from username to comma seperated list
+of certificate serial numbers that the user is allowed to use. One mapping
+per user. Wildcard characters, such as "*" and "?", are not recognized.
+Any leading or trailing whitespaces are stripped/ignored. Note that user
+"Admin" is the special hdfsproxy admin user. To make a user an admin, add
+the user's certificate serial number to user "Admin". Normal users cannot
+have "Admin" as username. Usernames can only comprise of 0-9a-zA-Z and
+underscore.
+
+-->
+
+<configuration>
+
+<property>
+ <name>Admin</name>
+ <value></value>
+ <description> Special hdfsproxy admin user
+ </description>
+</property>
+
+</configuration>
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-permissions.xml b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-permissions.xml
new file mode 100644
index 0000000000..b7373751bd
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/conf/user-permissions.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+
+This file defines the mappings from username to comma seperated list
+of directories/files that the user is allowed to use. One mapping
+per user. Wildcard characters, such as "*" and "?", are not recognized.
+For example, to match "/output" directory, one can use "/output" or
+"/output/", but not "/output/*". Any leading or trailing whitespaces
+in the name field are stripped/ignored, while only leading whitespaces
+in the value field are. Note that the special hdfsproxy admin user "Admin"
+doesn't automatically have access to any files, unless explicitly
+specified in this file. Usernames can only comprise of 0-9a-zA-Z and
+underscore.
+
+-->
+
+<configuration>
+
+<property>
+ <name></name>
+ <value></value>
+ <description>
+ </description>
+</property>
+
+</configuration>
diff --git a/lib/hadoop-0.20.0/contrib/hdfsproxy/hdfsproxy-1.0.jar b/lib/hadoop-0.20.0/contrib/hdfsproxy/hdfsproxy-1.0.jar
new file mode 100644
index 0000000000..a313391dfb
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/hdfsproxy/hdfsproxy-1.0.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/contrib/index/hadoop-0.20.0-index.jar b/lib/hadoop-0.20.0/contrib/index/hadoop-0.20.0-index.jar
new file mode 100644
index 0000000000..f1f850fcd3
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/index/hadoop-0.20.0-index.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/contrib/streaming/hadoop-0.20.0-streaming.jar b/lib/hadoop-0.20.0/contrib/streaming/hadoop-0.20.0-streaming.jar
new file mode 100644
index 0000000000..84251e3a3c
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/streaming/hadoop-0.20.0-streaming.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/contrib/thriftfs/hadoop-0.20.0-thriftfs.jar b/lib/hadoop-0.20.0/contrib/thriftfs/hadoop-0.20.0-thriftfs.jar
new file mode 100644
index 0000000000..bf10c05e1d
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/thriftfs/hadoop-0.20.0-thriftfs.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/contrib/vaidya/bin/vaidya.sh b/lib/hadoop-0.20.0/contrib/vaidya/bin/vaidya.sh
new file mode 100755
index 0000000000..ada6715342
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/vaidya/bin/vaidya.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+this="$0"
+while [ -h "$this" ]; do
+ ls=`ls -ld "$this"`
+ link=`expr "$ls" : '.*-> \(.*\)$'`
+ if expr "$link" : '.*/.*' > /dev/null; then
+ this="$link"
+ else
+ this=`dirname "$this"`/"$link"
+ fi
+done
+
+# convert relative path to absolute path
+bin=`dirname "$this"`
+script=`basename "$this"`
+bin=`cd "$bin"; pwd`
+this="$bin/$script"
+
+# Check if HADOOP_HOME AND JAVA_HOME is set.
+if [ -z $HADOOP_HOME ] ; then
+ echo "HADOOP_HOME environment variable not defined"
+ exit -1;
+fi
+
+if [ -z $JAVA_HOME ] ; then
+ echo "JAVA_HOME environment variable not defined"
+ exit -1;
+fi
+
+hadoopVersion=`$HADOOP_HOME/bin/hadoop version | awk 'BEGIN { RS = "" ; FS = "\n" } ; { print $1 }' | awk '{print $2}'`
+
+$JAVA_HOME/bin/java -classpath $HADOOP_HOME/hadoop-${hadoopVersion}-core.jar:$HADOOP_HOME/contrib/vaidya/hadoop-${hadoopVersion}-vaidya.jar:$HADOOP_HOME/lib/commons-logging-1.0.4.jar:${CLASSPATH} org.apache.hadoop.vaidya.postexdiagnosis.PostExPerformanceDiagnoser $@
diff --git a/lib/hadoop-0.20.0/contrib/vaidya/conf/postex_diagnosis_tests.xml b/lib/hadoop-0.20.0/contrib/vaidya/conf/postex_diagnosis_tests.xml
new file mode 100644
index 0000000000..f30d5d9cc8
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/vaidya/conf/postex_diagnosis_tests.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="ISO-8859-1"?>
+<!--
+**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+**
+ -->
+<!-- This is a diagnostic test configuration file. Diagnostic test driver
+ reads this file to get the list of tests and their configuration information
+
+ Title : Provides brief description of the test
+ ClassName : Provides the fully qualified java class name that implements the test condition
+ Description : Provides detailed information about the test describing how it checks for a specific
+ performance problem.
+ SuccessThreshold : (value between [0..1])
+ : Evaluation of a diagnostic test returns its level of impact on the job
+ performance. If impact value [between 0..1] is equal or greater than the
+ success threshold, means rule has detected the problem (TEST POSITIVE) else
+ rule has passed the test (TEST NEGATIVE). The impact level is calculated and
+ returned by each test's evaluate method. For tests that are boolean in nature
+ the impact level is either 0 or 1 and success threshold should be 1.
+ Importance : Indicates relative importance of this diagnostic test among the set of
+ diagnostic rules defined in this file. Three declarative values that
+ can be assigned are High, Medium or Low
+ Prescription : This is an optional element to store the advice to be included in the report upon test failure
+ This is overwritten in the report by any advice/prescription text returned by getPrescription method of
+ DiagnosticTest.
+ InputElement : Input element is made available to the diagnostic test for it to interpret and accept
+ any parameters specific to the test. These test specific parameters are used to configure
+ the tests without changing the java code.
+-->
+<PostExPerformanceDiagnosisTests>
+
+<DiagnosticTest>
+ <Title><![CDATA[Balanaced Reduce Partitioning]]></Title>
+ <ClassName><![CDATA[org.apache.hadoop.vaidya.postexdiagnosis.tests.BalancedReducePartitioning]]></ClassName>
+ <Description><![CDATA[This rule tests as to how well the input to reduce tasks is balanced]]></Description>
+ <Importance><![CDATA[High]]></Importance>
+ <SuccessThreshold><![CDATA[0.20]]></SuccessThreshold>
+ <Prescription><![CDATA[advice]]></Prescription>
+ <InputElement>
+ <PercentReduceRecords><![CDATA[0.85]]></PercentReduceRecords>
+ </InputElement>
+</DiagnosticTest>
+
+<DiagnosticTest>
+ <Title><![CDATA[Impact of Map tasks Re-Execution]]></Title>
+ <ClassName><![CDATA[org.apache.hadoop.vaidya.postexdiagnosis.tests.MapsReExecutionImpact]]></ClassName>
+ <Description><![CDATA[This test rule checks percentage of map task re-execution impacting the job performance]]></Description>
+ <Importance><![CDATA[High]]></Importance>
+ <SuccessThreshold><![CDATA[0.40]]></SuccessThreshold>
+ <Prescription><![CDATA[default advice]]></Prescription>
+ <InputElement>
+ </InputElement>
+</DiagnosticTest>
+
+<DiagnosticTest>
+ <Title><![CDATA[Impact of Reduce tasks Re-Execution]]></Title>
+ <ClassName><![CDATA[org.apache.hadoop.vaidya.postexdiagnosis.tests.ReducesReExecutionImpact]]></ClassName>
+ <Description><![CDATA[This test rule checks percentage of reduce task re-execution impacting the job performance]]></Description>
+ <Importance><![CDATA[High]]></Importance>
+ <SuccessThreshold><![CDATA[0.40]]></SuccessThreshold>
+ <Prescription><![CDATA[default advice]]></Prescription>
+ <InputElement>
+ </InputElement>
+</DiagnosticTest>
+
+<DiagnosticTest>
+ <Title><![CDATA[Map and/or Reduce tasks reading HDFS data as a side effect]]></Title>
+ <ClassName><![CDATA[org.apache.hadoop.vaidya.postexdiagnosis.tests.ReadingHDFSFilesAsSideEffect]]></ClassName>
+ <Description><![CDATA[This test rule checks if map/reduce tasks are reading data from HDFS as a side effect. More the data read as a side effect can potentially be a bottleneck across parallel execution of map/reduce tasks.]]></Description>
+ <Importance><![CDATA[High]]></Importance>
+ <SuccessThreshold><![CDATA[0.05]]></SuccessThreshold>
+ <Prescription><![CDATA[default advice]]></Prescription>
+ <InputElement>
+ </InputElement>
+</DiagnosticTest>
+
+<DiagnosticTest>
+ <Title><![CDATA[Map side disk spill]]></Title>
+ <ClassName><![CDATA[org.apache.hadoop.vaidya.postexdiagnosis.tests.MapSideDiskSpill]]></ClassName>
+ <Description><![CDATA[This test rule checks if Map tasks are spilling the data on to the local disk during the map side sorting due to insufficient sort buffer size. The impact is calculated as ratio between local bytes written to map output bytes. Impact is normalized using NormalizationFactor given below and any value greater than or equal to normalization factor is treated as maximum (i.e. 1). ]]></Description>
+ <Importance><![CDATA[Low]]></Importance>
+ <SuccessThreshold><![CDATA[0.3]]></SuccessThreshold>
+ <Prescription><![CDATA[default advice]]></Prescription>
+ <InputElement>
+ <NormalizationFactor>3.0</NormalizationFactor>
+ </InputElement>
+</DiagnosticTest>
+
+</PostExPerformanceDiagnosisTests>
diff --git a/lib/hadoop-0.20.0/contrib/vaidya/hadoop-0.20.0-vaidya.jar b/lib/hadoop-0.20.0/contrib/vaidya/hadoop-0.20.0-vaidya.jar
new file mode 100644
index 0000000000..534b18d974
--- /dev/null
+++ b/lib/hadoop-0.20.0/contrib/vaidya/hadoop-0.20.0-vaidya.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/hadoop-0.20.0-ant.jar b/lib/hadoop-0.20.0/hadoop-0.20.0-ant.jar
new file mode 100644
index 0000000000..0c2b74e74f
--- /dev/null
+++ b/lib/hadoop-0.20.0/hadoop-0.20.0-ant.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/hadoop-0.20.0-core.jar b/lib/hadoop-0.20.0/hadoop-0.20.0-core.jar
new file mode 100644
index 0000000000..c99ce6d44d
--- /dev/null
+++ b/lib/hadoop-0.20.0/hadoop-0.20.0-core.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/hadoop-0.20.0-examples.jar b/lib/hadoop-0.20.0/hadoop-0.20.0-examples.jar
new file mode 100644
index 0000000000..23b88f885d
--- /dev/null
+++ b/lib/hadoop-0.20.0/hadoop-0.20.0-examples.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/hadoop-0.20.0-test.jar b/lib/hadoop-0.20.0/hadoop-0.20.0-test.jar
new file mode 100644
index 0000000000..02b17d4160
--- /dev/null
+++ b/lib/hadoop-0.20.0/hadoop-0.20.0-test.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/hadoop-0.20.0-tools.jar b/lib/hadoop-0.20.0/hadoop-0.20.0-tools.jar
new file mode 100644
index 0000000000..60f5e600a6
--- /dev/null
+++ b/lib/hadoop-0.20.0/hadoop-0.20.0-tools.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/ivy.xml b/lib/hadoop-0.20.0/ivy.xml
new file mode 100644
index 0000000000..051ac6efb0
--- /dev/null
+++ b/lib/hadoop-0.20.0/ivy.xml
@@ -0,0 +1,261 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<ivy-module version="1.0">
+ <info organisation="org.apache.hadoop" module="${ant.project.name}">
+ <license name="Apache 2.0"/>
+ <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
+ <description>
+ Hadoop Core
+ </description>
+ </info>
+ <configurations defaultconfmapping="default">
+ <!--these match the Maven configurations-->
+ <conf name="default" extends="master,runtime"/>
+ <conf name="master" description="contains the artifact but no dependencies"/>
+ <conf name="runtime" description="runtime but not the artifact"
+ extends="client,server,s3-server,kfs"/>
+
+ <conf name="mandatory" description="contains the critical dependencies"
+ extends="commons-logging,log4j"/>
+
+ <!--
+ These public configurations contain the core dependencies for running hadoop client or server.
+ The server is effectively a superset of the client.
+ -->
+ <conf name="client" description="client-side dependencies"
+ extends="mandatory,httpclient"/>
+ <conf name="server" description="server-side dependencies"
+ extends="client"/>
+ <conf name="s3-client" description="dependencies for working with S3/EC2 infrastructure"
+ extends="client"/>
+ <conf name="s3-server" description="dependencies for running on S3/EC2 infrastructure"
+ extends="s3-client,server"/>
+ <conf name="kfs" description="dependencies for KFS file system support"/>
+ <conf name="ftp" description="dependencies for workign with FTP filesytems"
+ extends="mandatory"/>
+ <conf name="jetty" description="Jetty provides the in-VM HTTP daemon" extends="commons-logging"/>
+
+ <!--Private configurations. -->
+
+ <conf name="common" visibility="private" extends="runtime,mandatory,httpclient,ftp,jetty"
+ description="common artifacts"/>
+ <conf name="javadoc" visibility="private" description="artiracts required while performing doc generation"
+ extends="common,mandatory,jetty,lucene"/>
+ <!--Testing pulls in everything-->
+ <conf name="test" extends="common,default,s3-server,kfs" visibility="private"
+ description="the classpath needed to run tests"/>
+ <conf name="releaseaudit" visibility="private"
+ description="Artifacts required for releaseaudit target"/>
+
+ <conf name="commons-logging" visibility="private"/>
+ <conf name="httpclient" visibility="private" extends="commons-logging"/>
+ <conf name="log4j" visibility="private"/>
+ <conf name="lucene" visibility="private"/>
+ <conf name="jdiff" visibility="private" extends="log4j,s3-client,jetty,server"/>
+ <conf name="checkstyle" visibility="private"/>
+
+ </configurations>
+
+ <publications>
+ <!--get the artifact from our module name-->
+ <artifact conf="master"/>
+ </publications>
+ <dependencies>
+
+ <!--used client side-->
+<!-- <dependency org="commons-cli"
+ name="commons-cli"
+ rev="${commons-cli.version}"
+ conf="client->default"/> -->
+
+ <dependency org="checkstyle"
+ name="checkstyle"
+ rev="${checkstyle.version}"
+ conf="checkstyle->default"/>
+ <dependency org="jdiff"
+ name="jdiff"
+ rev="${jdiff.version}"
+ conf="jdiff->default"/>
+ <dependency org="xerces"
+ name="xerces"
+ rev="${xerces.version}"
+ conf="jdiff->default">
+ </dependency>
+
+ <dependency org="xmlenc"
+ name="xmlenc"
+ rev="${xmlenc.version}"
+ conf="server->default"/>
+
+ <!--Configuration: httpclient-->
+
+ <!--
+ commons-httpclient asks for too many files.
+ All it needs is commons-codec and commons-logging JARs
+ -->
+ <dependency org="commons-httpclient"
+ name="commons-httpclient"
+ rev="${commons-httpclient.version}"
+ conf="httpclient->master">
+ </dependency>
+
+ <dependency org="commons-codec"
+ name="commons-codec"
+ rev="${commons-codec.version}"
+ conf="httpclient->default"/>
+
+ <dependency org="commons-net"
+ name="commons-net"
+ rev="${commons-net.version}"
+ conf="ftp->default"/>
+
+ <!--Configuration: Jetty -->
+
+<!-- <dependency org="javax.servlet"
+ name="servlet-api"
+ rev="${servlet-api.version}"
+ conf="jetty->master"/> -->
+ <dependency org="org.mortbay.jetty"
+ name="jetty"
+ rev="${jetty.version}"
+ conf="jetty->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="jetty-util"
+ rev="${jetty-util.version}"
+ conf="jetty->master"/>
+
+ <dependency org="tomcat"
+ name="jasper-runtime"
+ rev="${jasper.version}"
+ conf="jetty->master"/>
+ <dependency org="tomcat"
+ name="jasper-compiler"
+ rev="${jasper.version}"
+ conf="jetty->master"/>
+<!-- this is resolved locally from the lib folder
+ <dependency org="tomcat"
+ name="jsp-api"
+ rev="${jsp-api.version}"
+ conf="jetty->master"/> -->
+ <dependency org="commons-el"
+ name="commons-el"
+ rev="${commons-el.version}"
+ conf="jetty->master"/>
+
+
+ <!--Configuration: commons-logging -->
+
+ <!--it is essential that only the master JAR of commons logging
+ is pulled in, as its dependencies are usually a mess, including things
+ like out of date servlet APIs, bits of Avalon, etc.
+ -->
+ <dependency org="commons-logging"
+ name="commons-logging"
+ rev="${commons-logging.version}"
+ conf="commons-logging->master"/>
+
+
+ <!--Configuration: commons-logging -->
+
+ <!--log4J is not optional until commons-logging.properties is stripped out of the JAR -->
+ <dependency org="log4j"
+ name="log4j"
+ rev="${log4j.version}"
+ conf="log4j->master"/>
+
+ <!--Configuration: s3-client -->
+ <!--there are two jets3t projects in the repository; this one goes up to 0.6 and
+ is assumed to be the live one-->
+ <dependency org="net.java.dev.jets3t"
+ name="jets3t"
+ rev="${jets3t.version}"
+ conf="s3-client->master"/>
+ <dependency org="commons-net"
+ name="commons-net"
+ rev="${commons-net.version}"
+ conf="s3-client->master"/>
+ <dependency org="org.mortbay.jetty"
+ name="servlet-api-2.5"
+ rev="${servlet-api-2.5.version}"
+ conf="s3-client->master"/>
+
+ <!--Configuration: kfs -->
+
+ <!-- This is not in the repository
+ <dependency org="org.kosmix"
+ name="kfs"
+ rev="${kfs.version}"
+ conf="kfs->default"/>-->
+
+ <!--Configuration: test -->
+
+ <!--artifacts needed for testing -->
+ <dependency org="junit"
+ name="junit"
+ rev="${junit.version}"
+ conf="common->default"/>
+ <dependency org="com.google.code.p.arat"
+ name="rat-lib"
+ rev="${rats-lib.version}"
+ conf="releaseaudit->default"/>
+ <dependency org="commons-lang"
+ name="commons-lang"
+ rev="${commons-lang.version}"
+ conf="releaseaudit->default"/>
+ <dependency org="commons-collections"
+ name="commons-collections"
+ rev="${commons-collections.version}"
+ conf="releaseaudit->default"/>
+<!--<dependency org="hsqldb"
+ name="hsqldb"
+ rev="${hsqldb.version}"
+ conf="common->default"/>
+ <dependency org="lucene"
+ name="lucene"
+ rev="${lucene.version}"
+ conf="javadoc->default"/> -->
+ <dependency org="org.apache.lucene"
+ name="lucene-core"
+ rev="${lucene-core.version}"
+ conf="javadoc->default"/>
+ <dependency org="commons-logging"
+ name="commons-logging-api"
+ rev="${commons-logging-api.version}"
+ conf="common->default"/>
+ <dependency org="org.slf4j"
+ name="slf4j-api"
+ rev="${slf4j-api.version}"
+ conf="common->master"/>
+ <dependency org="org.eclipse.jdt"
+ name="core"
+ rev="${core.version}"
+ conf="common->master"/>
+ <dependency org="oro"
+ name="oro"
+ rev="${oro.version}"
+ conf="common->default"/>
+ <dependency org="org.slf4j"
+ name="slf4j-log4j12"
+ rev="${slf4j-log4j12.version}"
+ conf="common->master">
+ </dependency>
+ </dependencies>
+
+</ivy-module>
diff --git a/lib/hadoop-0.20.0/ivy/hadoop-core.pom b/lib/hadoop-0.20.0/ivy/hadoop-core.pom
new file mode 100644
index 0000000000..ffdd18951e
--- /dev/null
+++ b/lib/hadoop-0.20.0/ivy/hadoop-core.pom
@@ -0,0 +1,257 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+ <!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-core</artifactId>
+ <packaging>jar</packaging>
+ <version>${hadoop.version}</version>
+ <description>
+ Hadoop is the distributed computing framework of Apache; hadoop-core contains
+ the filesystem, job tracker and map/reduce modules
+ </description>
+ <licenses>
+ <license>
+ <name>Apache License, Version 2.0</name>
+ <url>http://apache.org/licenses/LICENSE-2.0</url>
+ </license>
+ </licenses>
+ <dependencies>
+
+
+ <!-- always include commons-logging and log4J -->
+ <dependency>
+ <groupId>commons-logging</groupId>
+ <artifactId>commons-logging</artifactId>
+ <version>${commons-logging.version}</version>
+ <exclusions>
+ <exclusion>
+ <groupId>avalon-framework</groupId>
+ <artifactId>avalon-framework</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>javax.servlet</groupId>
+ <artifactId>servlet-api</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>logkit</groupId>
+ <artifactId>logkit</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ <version>${log4j.version}</version>
+ <scope>optional</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>javax.mail</groupId>
+ <artifactId>mail</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>javax.jms</groupId>
+ <artifactId>jms</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jdmk</groupId>
+ <artifactId>jmxtools</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jmx</groupId>
+ <artifactId>jmxri</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <!--SLF4J is a JAR-based dependency; this POM binds it to log4J-->
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ <version>${slf4j-api.version}</version>
+ <scope>optional</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ <version>${slf4j-log4j12.version}</version>
+ <scope>optional</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <!--Httpclient and its components are optional-->
+
+ <dependency>
+ <groupId>commons-httpclient</groupId>
+ <artifactId>commons-httpclient</artifactId>
+ <version>3.1</version>
+ <scope>optional</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>commons-logging</groupId>
+ <artifactId>commons-logging</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>commons-codec</groupId>
+ <artifactId>commons-codec</artifactId>
+ <version>1.3</version>
+ <scope>optional</scope>
+ </dependency>
+
+ <!--CLI is needed to scan the command line, but only the 1.0 branch is released -->
+ <dependency>
+ <groupId>commons-cli</groupId>
+ <artifactId>commons-cli</artifactId>
+ <version>2.0-20070823</version>
+ <scope>optional</scope>
+ </dependency>
+
+
+ <!-- this is used for the ftp:// filesystem-->
+ <dependency>
+ <groupId>commons-net</groupId>
+ <artifactId>commons-net</artifactId>
+ <version>1.4.1</version>
+ <scope>optional</scope>
+ </dependency>
+
+ <!-- Jetty is used to serve up the application. It is marked as optional because
+ clients do not need it. All server-side deployments will need
+ all of these files.-->
+ <dependency>
+ <groupId>javax.servlet</groupId>
+ <artifactId>servlet-api</artifactId>
+ <version>${servlet-api.version}</version>
+ <scope>optional</scope>
+ </dependency>
+ <dependency>
+ <groupId>jetty</groupId>
+ <artifactId>org.mortbay.jetty</artifactId>
+ <version>${jetty.version}</version>
+ <scope>optional</scope>
+ </dependency>
+
+
+ <!--JSP support -->
+
+ <dependency>
+ <groupId>org.mortbay.jetty</groupId>
+ <artifactId>jsp-2.1</artifactId>
+ <version>${jetty.version}</version>
+ <scope>optional</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mortbay.jetty</groupId>
+ <artifactId>jsp-api-2.1</artifactId>
+ <version>${jetty.version}</version>
+ <scope>optional</scope>
+ </dependency>
+ <dependency>
+ <groupId>commons-el</groupId>
+ <artifactId>commons-el</artifactId>
+ <version>${commons-el.version}</version>
+ <scope>optional</scope>
+ </dependency>
+
+
+ <!--JSPC assistance-->
+
+ <dependency>
+ <groupId>org.eclipse.jdt</groupId>
+ <artifactId>core</artifactId>
+ <version>${core.version}</version>
+ <scope>optional</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.ant</groupId>
+ <artifactId>ant</artifactId>
+ <version>${apacheant.version}</version>
+ <scope>optional</scope>
+ </dependency>
+
+ <!-- JetS3t is a client library for S3.
+ -It is only needed if you want to work with S3 filesystems
+ -It pulls in commons-logging 1.1.1 and does not exclude all the cruft that comes with it.
+ By excluding it we stay in control of versions and dependencies
+ -->
+
+ <dependency>
+ <groupId>net.java.dev.jets3t</groupId>
+ <artifactId>jets3t</artifactId>
+ <version>${jets3t.version}</version>
+ <scope>optional</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>commons-logging</groupId>
+ <artifactId>commons-logging</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <!--Kosmos filesystem
+ http://kosmosfs.sourceforge.net/
+ This is not in the central repository
+ -->
+ <!--
+ <dependency>
+ <groupId>org.kosmix</groupId>
+ <artifactId>kfs</artifactId>
+ <version>0.1</version>
+ <scope>optional</scope>
+ </dependency>
+ -->
+
+ <!--
+ http://xmlenc.sourceforge.net/
+ "The xmlenc library is a fast stream-based XML output library for Java."
+ -->
+ <dependency>
+ <groupId>xmlenc</groupId>
+ <artifactId>xmlenc</artifactId>
+ <version>0.52</version>
+ <scope>optional</scope>
+ </dependency>
+ </dependencies>
+</project>
diff --git a/lib/hadoop-0.20.0/ivy/ivy-2.0.0-rc2.jar b/lib/hadoop-0.20.0/ivy/ivy-2.0.0-rc2.jar
new file mode 100644
index 0000000000..fa9ef21c7b
--- /dev/null
+++ b/lib/hadoop-0.20.0/ivy/ivy-2.0.0-rc2.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/ivy/ivysettings.xml b/lib/hadoop-0.20.0/ivy/ivysettings.xml
new file mode 100644
index 0000000000..a7fcd22031
--- /dev/null
+++ b/lib/hadoop-0.20.0/ivy/ivysettings.xml
@@ -0,0 +1,81 @@
+<ivysettings>
+
+ <!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+ <!--
+ see http://www.jayasoft.org/ivy/doc/configuration
+ -->
+ <!-- you can override this property to use mirrors
+ http://repo1.maven.org/maven2/
+ http://mirrors.dotsrc.org/maven2
+ http://ftp.ggi-project.org/pub/packages/maven2
+ http://mirrors.sunsite.dk/maven2
+ http://public.planetmirror.com/pub/maven2
+ http://ibiblio.lsu.edu/main/pub/packages/maven2
+ http://www.ibiblio.net/pub/packages/maven2
+ -->
+ <property name="repo.maven.org"
+ value="http://repo1.maven.org/maven2/"
+ override="false"/>
+ <property name="snapshot.apache.org"
+ value="http://people.apache.org/repo/m2-snapshot-repository/"
+ override="false"/>
+ <property name="maven2.pattern"
+ value="[organisation]/[module]/[revision]/[module]-[revision]"/>
+ <property name="maven2.pattern.ext"
+ value="${maven2.pattern}.[ext]"/>
+ <!-- pull in the local repository -->
+ <include url="${ivy.default.conf.dir}/ivyconf-local.xml"/>
+ <settings defaultResolver="default"/>
+ <resolvers>
+ <ibiblio name="maven2"
+ root="${repo.maven.org}"
+ pattern="${maven2.pattern.ext}"
+ m2compatible="true"
+ />
+ <ibiblio name="apache-snapshot"
+ root="${snapshot.apache.org}"
+ pattern="${maven2.pattern.ext}"
+ m2compatible="true"
+ />
+ <chain name="default" dual="true">
+ <resolver ref="local"/>
+ <resolver ref="maven2"/>
+ </chain>
+ <chain name="internal">
+ <resolver ref="local"/>
+ </chain>
+ <chain name="external">
+ <resolver ref="maven2"/>
+ </chain>
+ <chain name="external-and-snapshots">
+ <resolver ref="maven2"/>
+ <resolver ref="apache-snapshot"/>
+ </chain>
+ </resolvers>
+ <modules>
+ <!--
+ This forces a requirement for other hadoop-artifacts to be built locally
+ rather than look for them online.
+
+ -->
+ <module organisation="org.apache.hadoop" name=".*" resolver="internal"/>
+ <!--until commons cli is external, we need to pull it in from the snapshot repository -if present -->
+ <module organisation="org.apache.commons" name=".*" resolver="external-and-snapshots"/>
+ </modules>
+</ivysettings>
diff --git a/lib/hadoop-0.20.0/ivy/libraries.properties b/lib/hadoop-0.20.0/ivy/libraries.properties
new file mode 100644
index 0000000000..17cf390d92
--- /dev/null
+++ b/lib/hadoop-0.20.0/ivy/libraries.properties
@@ -0,0 +1,71 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#This properties file lists the versions of the various artifacts used by hadoop and components.
+#It drives ivy and the generation of a maven POM
+
+# This is the version of hadoop we are generating
+hadoop.version=0.20.0
+
+#These are the versions of our dependencies (in alphabetical order)
+apacheant.version=1.7.0
+
+checkstyle.version=4.2
+
+commons-cli.version=2.0-SNAPSHOT
+commons-codec.version=1.3
+commons-collections.version=3.1
+commons-httpclient.version=3.0.1
+commons-lang.version=2.4
+commons-logging.version=1.0.4
+commons-logging-api.version=1.0.4
+commons-el.version=1.0
+commons-fileupload.version=1.2
+commons-io.version=1.4
+commons-net.version=1.4.1
+core.version=3.1.1
+coreplugin.version=1.3.2
+
+hsqldb.version=1.8.0.10
+
+#ivy.version=2.0.0-beta2
+ivy.version=2.0.0-rc2
+
+jasper.version=5.5.12
+#not able to figureout the version of jsp & jsp-api version to get it resolved throught ivy
+# but still declared here as we are going to have a local copy from the lib folder
+jsp.version=2.1
+jsp-api.version=5.5.12
+jets3t.version=0.6.1
+jetty.version=6.1.14
+jetty-util.version=6.1.14
+junit.version=3.8.1
+jdiff.version=1.0.9
+json.version=1.0
+
+kfs.version=0.1
+
+log4j.version=1.2.15
+lucene-core.version=2.3.1
+
+oro.version=2.0.8
+
+rats-lib.version=0.5.1
+
+servlet.version=4.0.6
+servlet-api-2.5.version=6.1.14
+servlet-api.version=2.5
+slf4j-api.version=1.4.3
+slf4j-log4j12.version=1.4.3
+
+xmlenc.version=0.52
+xerces.version=1.4.4
diff --git a/lib/hadoop-0.20.0/lib/.DS_Store b/lib/hadoop-0.20.0/lib/.DS_Store
new file mode 100644
index 0000000000..e0d363a012
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/.DS_Store
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/commons-cli-2.0-SNAPSHOT.jar b/lib/hadoop-0.20.0/lib/commons-cli-2.0-SNAPSHOT.jar
new file mode 100644
index 0000000000..0b1d51072a
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/commons-cli-2.0-SNAPSHOT.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/commons-codec-1.3.jar b/lib/hadoop-0.20.0/lib/commons-codec-1.3.jar
new file mode 100644
index 0000000000..957b6752af
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/commons-codec-1.3.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/commons-el-1.0.jar b/lib/hadoop-0.20.0/lib/commons-el-1.0.jar
new file mode 100644
index 0000000000..608ed796ca
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/commons-el-1.0.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/commons-httpclient-3.0.1.jar b/lib/hadoop-0.20.0/lib/commons-httpclient-3.0.1.jar
new file mode 100644
index 0000000000..cfc777c71d
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/commons-httpclient-3.0.1.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/commons-logging-1.0.4.jar b/lib/hadoop-0.20.0/lib/commons-logging-1.0.4.jar
new file mode 100644
index 0000000000..b73a80fab6
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/commons-logging-1.0.4.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/commons-logging-api-1.0.4.jar b/lib/hadoop-0.20.0/lib/commons-logging-api-1.0.4.jar
new file mode 100644
index 0000000000..ade9a13c78
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/commons-logging-api-1.0.4.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/commons-net-1.4.1.jar b/lib/hadoop-0.20.0/lib/commons-net-1.4.1.jar
new file mode 100644
index 0000000000..9666a92c80
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/commons-net-1.4.1.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/core-3.1.1.jar b/lib/hadoop-0.20.0/lib/core-3.1.1.jar
new file mode 100644
index 0000000000..ae0b635867
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/core-3.1.1.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/hsqldb-1.8.0.10.LICENSE.txt b/lib/hadoop-0.20.0/lib/hsqldb-1.8.0.10.LICENSE.txt
new file mode 100644
index 0000000000..d45b9f8cc0
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/hsqldb-1.8.0.10.LICENSE.txt
@@ -0,0 +1,66 @@
+/* Copyright (c) 1995-2000, The Hypersonic SQL Group.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the Hypersonic SQL Group nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE HYPERSONIC SQL GROUP,
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This software consists of voluntary contributions made by many individuals
+ * on behalf of the Hypersonic SQL Group.
+ *
+ *
+ * For work added by the HSQL Development Group:
+ *
+ * Copyright (c) 2001-2004, The HSQL Development Group
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the HSQL Development Group nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG,
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
diff --git a/lib/hadoop-0.20.0/lib/hsqldb-1.8.0.10.jar b/lib/hadoop-0.20.0/lib/hsqldb-1.8.0.10.jar
new file mode 100644
index 0000000000..e010269ddf
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/hsqldb-1.8.0.10.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/jasper-compiler-5.5.12.jar b/lib/hadoop-0.20.0/lib/jasper-compiler-5.5.12.jar
new file mode 100644
index 0000000000..2a410b4b58
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jasper-compiler-5.5.12.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/jasper-runtime-5.5.12.jar b/lib/hadoop-0.20.0/lib/jasper-runtime-5.5.12.jar
new file mode 100644
index 0000000000..743d906c1f
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jasper-runtime-5.5.12.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.17.0.xml b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.17.0.xml
new file mode 100644
index 0000000000..69dded3140
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.17.0.xml
@@ -0,0 +1,43272 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Jun 10 22:31:49 UTC 2008 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="hadoop 0.17.0"
+ jdversion="1.1.0">
+
+<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/oom/tools/src/jdiff-1.1.0-src/jdiff.jar:/home/oom/tools/src/jdiff-1.1.0-src/lib/xerces.jar -classpath /home/oom/work/eclipse/hadoop-17/lib/commons-cli-2.0-SNAPSHOT.jar:/home/oom/work/eclipse/hadoop-17/lib/commons-codec-1.3.jar:/home/oom/work/eclipse/hadoop-17/lib/commons-httpclient-3.0.1.jar:/home/oom/work/eclipse/hadoop-17/lib/commons-logging-1.0.4.jar:/home/oom/work/eclipse/hadoop-17/lib/commons-logging-api-1.0.4.jar:/home/oom/work/eclipse/hadoop-17/lib/jets3t-0.5.0.jar:/home/oom/work/eclipse/hadoop-17/lib/jetty-5.1.4.jar:/home/oom/work/eclipse/hadoop-17/lib/jetty-ext/commons-el.jar:/home/oom/work/eclipse/hadoop-17/lib/jetty-ext/jasper-compiler.jar:/home/oom/work/eclipse/hadoop-17/lib/jetty-ext/jasper-runtime.jar:/home/oom/work/eclipse/hadoop-17/lib/jetty-ext/jsp-api.jar:/home/oom/work/eclipse/hadoop-17/lib/junit-3.8.1.jar:/home/oom/work/eclipse/hadoop-17/lib/kfs-0.1.jar:/home/oom/work/eclipse/hadoop-17/lib/log4j-1.2.13.jar:/home/oom/work/eclipse/hadoop-17/lib/servlet-api.jar:/home/oom/work/eclipse/hadoop-17/lib/xmlenc-0.52.jar:/home/oom/work/eclipse/hadoop-17/conf:/usr/releng/share/java/ant/1.6.5/lib/ant-launcher.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-antlr.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-apache-bcel.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-apache-bsf.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-apache-log4j.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-apache-oro.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-apache-regexp.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-apache-resolver.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-commons-logging.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-commons-net.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-icontract.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-jai.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-javamail.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-jdepend.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-jmf.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-jsch.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-junit.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-netrexx.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-nodeps.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-starteam.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-stylebook.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-swing.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-trax.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-vaj.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-weblogic.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-xalan1.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-xslp.jar:/usr/releng/share/java/ant/1.6.5/lib/ant.jar:/usr/releng/share/java/ant/1.6.5/lib/xercesImpl.jar:/usr/releng/share/java/ant/1.6.5/lib/xml-apis.jar:/usr/releng/share/java/ant/1.6.5/lib/junit-3.8.1.jar:/nfs/ystools/vol/ystools/releng/build/Linux_2.6_rh4_x86_64/tools/java/jdk1.6.0_i586/lib/tools.jar -sourcepath /home/oom/work/eclipse/hadoop-17/src/java -apidir /home/oom/work/eclipse/hadoop-17/build -apiname hadoop 0.17.0 -->
+<package name="org.apache.hadoop">
+ <!-- start class org.apache.hadoop.HadoopVersionAnnotation -->
+ <class name="HadoopVersionAnnotation" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.annotation.Annotation"/>
+ <doc>
+ <![CDATA[A package attribute that captures the version of Hadoop that was compiled.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.HadoopVersionAnnotation -->
+</package>
+<package name="org.apache.hadoop.conf">
+ <!-- start interface org.apache.hadoop.conf.Configurable -->
+ <interface name="Configurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration to be used by this object.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the configuration used by this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Something that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.conf.Configurable -->
+ <!-- start class org.apache.hadoop.conf.Configuration -->
+ <class name="Configuration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable&lt;java.util.Map.Entry&lt;java.lang.String, java.lang.String&gt;&gt;"/>
+ <constructor name="Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configuration" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration with the same settings cloned from another.
+
+ @param other the configuration from which to clone settings.]]>
+ </doc>
+ </constructor>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param name resource to be added, the classpath is examined for a file
+ with that name.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="url" type="java.net.URL"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param url url of the resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param file file-path of resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, <code>null</code> if
+ no such property exists.
+
+ Values are processed for <a href="#VariableExpansion">variable expansion</a>
+ before being returned.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="getRaw" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, without doing
+ <a href="#VariableExpansion">variable expansion</a>.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the <code>value</code> of the <code>name</code> property.
+
+ @param name property name.
+ @param value property value.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property. If no such property
+ exists, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value, or <code>defaultValue</code> if the property
+ doesn't exist.]]>
+ </doc>
+ </method>
+ <method name="getInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="int"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as an <code>int</code>.
+
+ If no such property exists, or if the specified value is not a valid
+ <code>int</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as an <code>int</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to an <code>int</code>.
+
+ @param name property name.
+ @param value <code>int</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="long"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>long</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>long</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>long</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>long</code>.
+
+ @param name property name.
+ @param value <code>long</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="float"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>float</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>float</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>float</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getBoolean" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="boolean"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>boolean</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>boolean</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>boolean</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setBoolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>boolean</code>.
+
+ @param name property name.
+ @param value <code>boolean</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Parse the given attribute as a set of integer ranges
+ @param name the attribute name
+ @param defaultValue the default value if it is not set
+ @return a new set of ranges from the configured value]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then <code>null</code> is returned.
+
+ @param name property name.
+ @return property value as an array of <code>String</code>s,
+ or <code>null</code>.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then default value is returned.
+
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of <code>String</code>s,
+ or default value.]]>
+ </doc>
+ </method>
+ <method name="setStrings"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="values" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Set the array of string values for the <code>name</code> property as
+ as comma delimited values.
+
+ @param name property name.
+ @param values The values]]>
+ </doc>
+ </method>
+ <method name="getClassByName" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Load a class by name.
+
+ @param name the class name.
+ @return the class object.
+ @throws ClassNotFoundException if the class is not found.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>.
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;? extends U&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends U&gt;"/>
+ <param name="xface" type="java.lang.Class&lt;U&gt;"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>
+ implementing the interface specified by <code>xface</code>.
+
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ An exception is thrown if the returned class does not implement the named
+ interface.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @param xface the interface implemented by the named class.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="xface" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to the name of a
+ <code>theClass</code> implementing the given interface <code>xface</code>.
+
+ An exception is thrown if <code>theClass</code> does not implement the
+ interface <code>xface</code>.
+
+ @param name property name.
+ @param theClass property value.
+ @param xface the interface implemented by the named class.]]>
+ </doc>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file under a directory named by <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file name under a directory named in <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getResource" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the {@link URL} for the named resource.
+
+ @param name resource name.
+ @return the url for the named resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get an input stream attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return an input stream attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsReader" return="java.io.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a {@link Reader} attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return a reader attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;java.util.Map.Entry&lt;java.lang.String, java.lang.String&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get an {@link Iterator} to go through the list of <code>String</code>
+ key-value pairs in the configuration.
+
+ @return an iterator over the entries.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write out the non-default properties in this configuration to the give
+ {@link OutputStream}.
+
+ @param out the output stream to write to.]]>
+ </doc>
+ </method>
+ <method name="getClassLoader" return="java.lang.ClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link ClassLoader} for this job.
+
+ @return the correct class loader.]]>
+ </doc>
+ </method>
+ <method name="setClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="classLoader" type="java.lang.ClassLoader"/>
+ <doc>
+ <![CDATA[Set the class loader that will be used to load the various objects.
+
+ @param classLoader the new class loader.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setQuietMode"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="quietmode" type="boolean"/>
+ <doc>
+ <![CDATA[Set the quiteness-mode.
+
+ In the quite-mode error and informational messages might not be logged.
+
+ @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
+ to turn it off.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[For debugging. List non-default properties to the terminal and exit.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provides access to configuration parameters.
+
+ <h4 id="Resources">Resources</h4>
+
+ <p>Configurations are specified by resources. A resource contains a set of
+ name/value pairs as XML data. Each resource is named by either a
+ <code>String</code> or by a {@link Path}. If named by a <code>String</code>,
+ then the classpath is examined for a file with that name. If named by a
+ <code>Path</code>, then the local filesystem is examined directly, without
+ referring to the classpath.
+
+ <p>Hadoop by default specifies two resources, loaded in-order from the
+ classpath: <ol>
+ <li><tt><a href="{@docRoot}/../hadoop-default.html">hadoop-default.xml</a>
+ </tt>: Read-only defaults for hadoop.</li>
+ <li><tt>hadoop-site.xml</tt>: Site-specific configuration for a given hadoop
+ installation.</li>
+ </ol>
+ Applications may add additional resources, which are loaded
+ subsequent to these resources in the order they are added.
+
+ <h4 id="FinalParams">Final Parameters</h4>
+
+ <p>Configuration parameters may be declared <i>final</i>.
+ Once a resource declares a value final, no subsequently-loaded
+ resource can alter that value.
+ For example, one might define a final parameter with:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;dfs.client.buffer.dir&lt;/name&gt;
+ &lt;value&gt;/tmp/hadoop/dfs/client&lt;/value&gt;
+ <b>&lt;final&gt;true&lt;/final&gt;</b>
+ &lt;/property&gt;</pre></tt>
+
+ Administrators typically define parameters as final in
+ <tt>hadoop-site.xml</tt> for values that user applications may not alter.
+
+ <h4 id="VariableExpansion">Variable Expansion</h4>
+
+ <p>Value strings are first processed for <i>variable expansion</i>. The
+ available properties are:<ol>
+ <li>Other properties defined in this Configuration; and, if a name is
+ undefined here,</li>
+ <li>Properties in {@link System#getProperties()}.</li>
+ </ol>
+
+ <p>For example, if a configuration resource contains the following property
+ definitions:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;basedir&lt;/name&gt;
+ &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
+ &lt;/property&gt;
+
+ &lt;property&gt;
+ &lt;name&gt;tempdir&lt;/name&gt;
+ &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt;
+ &lt;/property&gt;</pre></tt>
+
+ When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt>
+ will be resolved to another property in this Configuration, while
+ <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value
+ of the System property with that name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration -->
+ <!-- start class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <class name="Configuration.IntegerRanges" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Configuration.IntegerRanges"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Configuration.IntegerRanges" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isIncluded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Is the given value in the set of ranges
+ @param value the value to check
+ @return is the value in the ranges?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A class that represents a set of positive integer ranges. It parses
+ strings of the form: "2-3,5,7-" where ranges are separated by comma and
+ the lower/upper bounds are separated by dash. Either the lower or upper
+ bound may be omitted meaning all values up to or over. So the string
+ above means 2, 3, 5, and 7, 8, 9, ...]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <!-- start class org.apache.hadoop.conf.Configured -->
+ <class name="Configured" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Configured"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configured" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Base class for things that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configured -->
+ <doc>
+ <![CDATA[Configuration of system parameters.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.dfs">
+ <!-- start class org.apache.hadoop.dfs.AlreadyBeingCreatedException -->
+ <class name="AlreadyBeingCreatedException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AlreadyBeingCreatedException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The exception that happens when you ask to create a file that already
+ is being created, but is not closed yet.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.AlreadyBeingCreatedException -->
+ <!-- start class org.apache.hadoop.dfs.Balancer -->
+ <class name="Balancer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Run a balancer
+ @param args]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main method of Balancer
+ @param args arguments to a Balancer
+ @exception any exception occurs during datanode balancing]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return this balancer's configuration]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[set this balancer's configuration]]>
+ </doc>
+ </method>
+ <field name="SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ALREADY_RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NO_MOVE_BLOCK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NO_MOVE_PROGRESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IO_EXCEPTION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ILLEGAL_ARGS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>The balancer is a tool that balances disk space usage on an HDFS cluster
+ when some datanodes become full or when new empty nodes join the cluster.
+ The tool is deployed as an application program that can be run by the
+ cluster administrator on a live HDFS cluster while applications
+ adding and deleting files.
+
+ <p>SYNOPSIS
+ <pre>
+ To start:
+ bin/start-balancer.sh [-threshold <threshold>]
+ Example: bin/ start-balancer.sh
+ start the balancer with a default threshold of 10%
+ bin/ start-balancer.sh -threshold 5
+ start the balancer with a threshold of 5%
+ To stop:
+ bin/ stop-balancer.sh
+ </pre>
+
+ <p>DESCRIPTION
+ <p>The threshold parameter is a fraction in the range of (0%, 100%) with a
+ default value of 10%. The threshold sets a target for whether the cluster
+ is balanced. A cluster is balanced if for each datanode, the utilization
+ of the node (ratio of used space at the node to total capacity of the node)
+ differs from the utilization of the (ratio of used space in the cluster
+ to total capacity of the cluster) by no more than the threshold value.
+ The smaller the threshold, the more balanced a cluster will become.
+ It takes more time to run the balancer for small threshold values.
+ Also for a very small threshold the cluster may not be able to reach the
+ balanced state when applications write and delete files concurrently.
+
+ <p>The tool moves blocks from highly utilized datanodes to poorly
+ utilized datanodes iteratively. In each iteration a datanode moves or
+ receives no more than the lesser of 10G bytes or the threshold fraction
+ of its capacity. Each iteration runs no more than 20 minutes.
+ At the end of each iteration, the balancer obtains updated datanodes
+ information from the namenode.
+
+ <p>A system property that limits the balancer's use of bandwidth is
+ defined in the default configuration file:
+ <pre>
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>1048576</value>
+ <description> Specifies the maximum bandwidth that each datanode
+ can utilize for the balancing purpose in term of the number of bytes
+ per second. </description>
+ </property>
+ </pre>
+
+ <p>This property determines the maximum speed at which a block will be
+ moved from one datanode to another. The default value is 1MB/s. The higher
+ the bandwidth, the faster a cluster can reach the balanced state,
+ but with greater competition with application processes. If an
+ administrator changes the value of this property in the configuration
+ file, the change is observed when HDFS is next restarted.
+
+ <p>MONITERING BALANCER PROGRESS
+ <p>After the balancer is started, an output file name where the balancer
+ progress will be recorded is printed on the screen. The administrator
+ can monitor the running of the balancer by reading the output file.
+ The output shows the balancer's status iteration by iteration. In each
+ iteration it prints the starting time, the iteration number, the total
+ number of bytes that have been moved in the previous iterations,
+ the total number of bytes that are left to move in order for the cluster
+ to be balanced, and the number of bytes that are being moved in this
+ iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left
+ To Move" is decreasing.
+
+ <p>Running multiple instances of the balancer in an HDFS cluster is
+ prohibited by the tool.
+
+ <p>The balancer automatically exits when any of the following five
+ conditions is satisfied:
+ <ol>
+ <li>The cluster is balanced;
+ <li>No block can be moved;
+ <li>No block has been moved for five consecutive iterations;
+ <li>An IOException occurs while communicating with the namenode;
+ <li>Another balancer is running.
+ </ol>
+
+ <p>Upon exit, a balancer returns an exit code and prints one of the
+ following messages to the output file in corresponding to the above exit
+ reasons:
+ <ol>
+ <li>The cluster is balanced. Exiting
+ <li>No block can be moved. Exiting...
+ <li>No block has been moved for 3 iterations. Exiting...
+ <li>Received an IO exception: failure reason. Exiting...
+ <li>Another balancer is running. Exiting...
+ </ol>
+
+ <p>The administrator can interrupt the execution of the balancer at any
+ time by running the command "stop-balancer.sh" on the machine where the
+ balancer is running.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.Balancer -->
+ <!-- start class org.apache.hadoop.dfs.ChecksumDistributedFileSystem -->
+ <class name="ChecksumDistributedFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumDistributedFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ChecksumDistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </constructor>
+ <method name="getContentLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRawCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw capacity of the filesystem, disregarding
+ replication .]]>
+ </doc>
+ </method>
+ <method name="getRawUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw used space in the filesystem, disregarding
+ replication .]]>
+ </doc>
+ </method>
+ <method name="getDataNodeStats" return="org.apache.hadoop.dfs.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return statistics for each datanode.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.dfs.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+
+ @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalize previously upgraded files system state.]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.dfs.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.dfs.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[We need to find the blocks that didn't match. Likely only one
+ is corrupt but we will report both to the namenode. In the future,
+ we can consider figuring out exactly which block is corrupt.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the stat information about the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of ChecksumFileSystem over DistributedFileSystem.
+ Note that as of now (May 07), DistributedFileSystem natively checksums
+ all of its data. Using this class is not be necessary in most cases.
+ Currently provided mainly for backward compatibility and testing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.ChecksumDistributedFileSystem -->
+ <!-- start class org.apache.hadoop.dfs.DataBlockScanner -->
+ <class name="DataBlockScanner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DataBlockScanner -->
+ <!-- start class org.apache.hadoop.dfs.DataBlockScanner.Servlet -->
+ <class name="DataBlockScanner.Servlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataBlockScanner.Servlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DataBlockScanner.Servlet -->
+ <!-- start class org.apache.hadoop.dfs.DataChecksum -->
+ <class name="DataChecksum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.zip.Checksum"/>
+ <method name="newDataChecksum" return="org.apache.hadoop.dfs.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="int"/>
+ <param name="bytesPerChecksum" type="int"/>
+ </method>
+ <method name="newDataChecksum" return="org.apache.hadoop.dfs.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <doc>
+ <![CDATA[Creates a DataChecksum from HEADER_LEN bytes from arr[offset].
+ @return DataChecksum of the type in the array or null in case of an error.]]>
+ </doc>
+ </method>
+ <method name="newDataChecksum" return="org.apache.hadoop.dfs.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This constructucts a DataChecksum by reading HEADER_LEN bytes from
+ input stream <i>in</i>]]>
+ </doc>
+ </method>
+ <method name="writeHeader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the checksum header to the output stream <i>out</i>.]]>
+ </doc>
+ </method>
+ <method name="getHeader" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="writeValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="reset" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the current checksum to the stream.
+ If <i>reset</i> is true, then resets the checksum.
+ @return number of bytes written. Will be equal to getChecksumSize();]]>
+ </doc>
+ </method>
+ <method name="writeValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="reset" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the current checksum to a buffer.
+ If <i>reset</i> is true, then resets the checksum.
+ @return number of bytes written. Will be equal to getChecksumSize();]]>
+ </doc>
+ </method>
+ <method name="compare" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <doc>
+ <![CDATA[Compares the checksum located at buf[offset] with the current checksum.
+ @return true if the checksum matches and false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getChecksumType" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getChecksumSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesPerChecksum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumBytesInSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getChecksumHeaderSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getValue" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ </method>
+ <field name="HEADER_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKSUM_NULL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKSUM_CRC32" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class provides inteface and utilities for processing checksums for
+ DFS data transfers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DataChecksum -->
+ <!-- start class org.apache.hadoop.dfs.DataNode -->
+ <class name="DataNode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.dfs.FSConstants"/>
+ <implements name="java.lang.Runnable"/>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use {@link NetUtils#createSocketAddr(String)} instead.]]>
+ </doc>
+ </method>
+ <method name="getDataNode" return="org.apache.hadoop.dfs.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the DataNode object]]>
+ </doc>
+ </method>
+ <method name="getNameNodeAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSelfAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNamenode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the namenode's identifier]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down this instance of the datanode.
+ Returns only after shutdown is complete.]]>
+ </doc>
+ </method>
+ <method name="offerService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Main loop for the DataNode. Runs until shutdown,
+ forever calling remote NameNode functions.]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[No matter what kind of exception we get, keep retrying to offerService().
+ That's the loop that connects to the NameNode and provides basic DataNode
+ functionality.
+
+ Only stop when "shouldRun" is turned off (which can only happen at shutdown).]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="scheduleBlockReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delay" type="long"/>
+ <doc>
+ <![CDATA[This methods arranges for the data node to send the block report at the next heartbeat.]]>
+ </doc>
+ </method>
+ <method name="getFSDataset" return="org.apache.hadoop.dfs.FSDatasetInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method is used for testing.
+ Examples are adding and deleting blocks directly.
+ The most common usage will be when the data node's storage is similated.
+
+ @return the fsdataset that stores the blocks]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DataNode is a class (and program) that stores a set of
+ blocks for a DFS deployment. A single deployment can
+ have one or many DataNodes. Each DataNode communicates
+ regularly with a single NameNode. It also communicates
+ with client code and other DataNodes from time to time.
+
+ DataNodes store a series of named blocks. The DataNode
+ allows client code to read these blocks, or to write new
+ block data. The DataNode may also, in response to instructions
+ from its NameNode, delete blocks or copy blocks to/from other
+ DataNodes.
+
+ The DataNode maintains just one critical table:
+ block-> stream of bytes (of BLOCK_SIZE or less)
+
+ This info is stored on a local disk. The DataNode
+ reports the table's contents to the NameNode upon startup
+ and every so often afterwards.
+
+ DataNodes spend their lives in an endless loop of asking
+ the NameNode for something to do. A NameNode cannot connect
+ to a DataNode directly; a NameNode simply returns values from
+ functions invoked by a DataNode.
+
+ DataNodes maintain an open server socket so that client code
+ or other DataNodes can read/write data. The host/port for
+ this server is reported to the NameNode, which then sends that
+ information to clients or other DataNodes that might be interested.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DataNode -->
+ <!-- start class org.apache.hadoop.dfs.DatanodeDescriptor -->
+ <class name="DatanodeDescriptor" extends="org.apache.hadoop.dfs.DatanodeInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DatanodeDescriptor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+ @param nodeID id of the data node]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network
+ @param hostName it could be different from host specified for DatanodeID]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID, long, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param capacity capacity of the data node
+ @param dfsUsed space used by the data node
+ @param remaining remaing capacity of the data node
+ @param xceiverCount # of data transfers at the data node]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID, java.lang.String, java.lang.String, long, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network
+ @param capacity capacity of the data node, including space used by non-dfs
+ @param dfsUsed the used space by dfs datanode
+ @param remaining remaing capacity of the data node
+ @param xceiverCount # of data transfers at the data node]]>
+ </doc>
+ </constructor>
+ <field name="isAlive" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeDescriptor tracks stats on a given DataNode,
+ such as available storage capacity, last update time, etc.,
+ and maintains a set of blocks stored on the datanode.
+
+ This data structure is a data structure that is internal
+ to the namenode. It is *not* sent over-the-wire to the Client
+ or the Datnodes. Neither is it stored persistently in the
+ fsImage.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DatanodeDescriptor -->
+ <!-- start class org.apache.hadoop.dfs.DatanodeID -->
+ <class name="DatanodeID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeID default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="org.apache.hadoop.dfs.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeID copy constructor
+
+ @param from]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="java.lang.String, java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create DatanodeID
+
+ @param nodeName (hostname:portNumber)
+ @param storageID data storage ID]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return hostname:portNumber.]]>
+ </doc>
+ </method>
+ <method name="getStorageID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return data storage ID.]]>
+ </doc>
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return infoPort (the port at which the HTTP server bound to)]]>
+ </doc>
+ </method>
+ <method name="getHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return hostname and no :portNumber.]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="to" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Comparable.
+ Basis of compare is the String name (host:portNumber) only.
+ @param o
+ @return as specified by Comparable.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="name" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="storageID" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="infoPort" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeID is composed of the data node
+ name (hostname:portNumber) and the data storage ID,
+ which it currently represents.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DatanodeID -->
+ <!-- start class org.apache.hadoop.dfs.DatanodeInfo -->
+ <class name="DatanodeInfo" extends="org.apache.hadoop.dfs.DatanodeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.Node"/>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw capacity.]]>
+ </doc>
+ </method>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The used space by the data node.]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw free space.]]>
+ </doc>
+ </method>
+ <method name="getLastUpdate" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The time when this information was accurate.]]>
+ </doc>
+ </method>
+ <method name="getXceiverCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[number of active connections]]>
+ </doc>
+ </method>
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[rack name]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the rack name]]>
+ </doc>
+ </method>
+ <method name="getHostName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setHostName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ </method>
+ <method name="getDatanodeReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A formatted string for reporting the status of the DataNode.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="level" type="int"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="capacity" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dfsUsed" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="remaining" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="lastUpdate" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="xceiverCount" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="adminState" type="org.apache.hadoop.dfs.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeInfo represents the status of a DataNode.
+ This object is used for communication in the
+ Datanode Protocol and the Client Protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DatanodeInfo -->
+ <!-- start class org.apache.hadoop.dfs.DatanodeInfo.AdminStates -->
+ <class name="DatanodeInfo.AdminStates" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.DatanodeInfo.AdminStates&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.DatanodeInfo.AdminStates[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.DatanodeInfo.AdminStates"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DatanodeInfo.AdminStates -->
+ <!-- start class org.apache.hadoop.dfs.DFSAdmin -->
+ <class name="DFSAdmin" extends="org.apache.hadoop.fs.FsShell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSAdmin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a DFSAdmin object.]]>
+ </doc>
+ </constructor>
+ <constructor name="DFSAdmin" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a DFSAdmin object.]]>
+ </doc>
+ </constructor>
+ <method name="report"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gives a report on how the FileSystem is doing.
+ @exception IOException if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Safe mode maintenance command.
+ Usage: java DFSAdmin -safemode [enter | leave | get]
+ @param argv List of of command line parameters.
+ @param idx The index of the command that is being processed.
+ @exception IOException if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <method name="refreshNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to reread the hosts and excluded hosts
+ file.
+ Usage: java DFSAdmin -refreshNodes
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to finalize previously performed upgrade.
+ Usage: java DFSAdmin -finalizeUpgrade
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="upgradeProgress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to request current distributed upgrade status,
+ a detailed status, or to force the upgrade to proceed.
+
+ Usage: java DFSAdmin -upgradeProgress [status | details | force]
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="metaSave" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps DFS data structures into specified file.
+ Usage: java DFSAdmin -metasave filename
+ @param argv List of of command line parameters.
+ @param idx The index of the command that is being processed.
+ @exception IOException if an error accoured wile accessing
+ the file or path.]]>
+ </doc>
+ </method>
+ <method name="printUsage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Displays format of commands.
+ @param cmd The command that is being executed.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@param argv The parameters passed to this program.
+ @exception Exception if the filesystem does not exist.
+ @return 0 on success, non zero on error.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods.
+ @param argv Command line parameters.
+ @exception Exception if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class provides some DFS administrative access.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DFSAdmin -->
+ <!-- start class org.apache.hadoop.dfs.DFSck -->
+ <class name="DFSck" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="DFSck" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Filesystem checker.
+ @param conf current Configuration
+ @throws Exception]]>
+ </doc>
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
+ sub-optimal conditions.
+ <p>The tool scans all files and directories, starting from an indicated
+ root path. The following abnormal conditions are detected and handled:</p>
+ <ul>
+ <li>files with blocks that are completely missing from all datanodes.<br/>
+ In this case the tool can perform one of the following actions:
+ <ul>
+ <li>none ({@link NamenodeFsck#FIXING_NONE})</li>
+ <li>move corrupted files to /lost+found directory on DFS
+ ({@link NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a
+ block chains, representing longest consecutive series of valid blocks.</li>
+ <li>delete corrupted files ({@link NamenodeFsck#FIXING_DELETE})</li>
+ </ul>
+ </li>
+ <li>detect files with under-replicated or over-replicated blocks</li>
+ </ul>
+ Additionally, the tool collects a detailed overall DFS statistics, and
+ optionally can print detailed statistics on block locations and replication
+ factors of each file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DFSck -->
+ <!-- start class org.apache.hadoop.dfs.DistributedFileSystem -->
+ <class name="DistributedFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename files/dirs]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get rid of Path f, whether a true file or dir.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[requires a boolean check to delete a non
+ empty directory recursively.]]>
+ </doc>
+ </method>
+ <method name="getContentLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDiskStatus" return="org.apache.hadoop.dfs.DistributedFileSystem.DiskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the disk usage of the filesystem, including total capacity,
+ used space, and remaining space]]>
+ </doc>
+ </method>
+ <method name="getRawCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw capacity of the filesystem, disregarding
+ replication .]]>
+ </doc>
+ </method>
+ <method name="getRawUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw used space in the filesystem, disregarding
+ replication .]]>
+ </doc>
+ </method>
+ <method name="getDataNodeStats" return="org.apache.hadoop.dfs.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return statistics for each datanode.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.dfs.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+
+ @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(
+ FSConstants.SafeModeAction)]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalize previously upgraded files system state.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.dfs.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.dfs.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[We need to find the blocks that didn't match. Likely only one
+ is corrupt but we will report both to the namenode. In the future,
+ we can consider figuring out exactly which block is corrupt.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the stat information about the file.
+ @throws FileNotFoundException if the file does not exist.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implementation of the abstract FileSystem for the DFS system.
+ This object is the way end-user code interacts with a Hadoop
+ DistributedFileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DistributedFileSystem -->
+ <!-- start class org.apache.hadoop.dfs.DistributedFileSystem.DiskStatus -->
+ <class name="DistributedFileSystem.DiskStatus" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedFileSystem.DiskStatus" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DistributedFileSystem.DiskStatus -->
+ <!-- start class org.apache.hadoop.dfs.FileDataServlet -->
+ <class name="FileDataServlet" extends="org.apache.hadoop.dfs.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileDataServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Service a GET request as described below.
+ Request:
+ {@code
+ GET http://<nn>:<port>/data[/<path>] HTTP/1.1
+ }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Redirect queries about the hosted filesystem to an appropriate datanode.
+ @see org.apache.hadoop.dfs.HftpFileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FileDataServlet -->
+ <!-- start class org.apache.hadoop.dfs.FsckServlet -->
+ <class name="FsckServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FsckServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in Namesystem's jetty to do fsck on namenode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FsckServlet -->
+ <!-- start interface org.apache.hadoop.dfs.FSConstants -->
+ <interface name="FSConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="MIN_BLOCKS_FOR_WRITE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_ERROR" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_HEARTBEAT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_BLOCKRECEIVED" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_BLOCKREPORT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_TRANSFERDATA" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_OPEN" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_STARTFILE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_ADDBLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RENAMETO" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_DELETE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_COMPLETEFILE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_LISTING" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_OBTAINLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RELEASELOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_EXISTS" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_ISDIR" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_MKDIRS" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RENEW_LEASE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_ABANDONBLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RAWSTATS" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_DATANODEREPORT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_DATANODE_HINTS" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_TRANSFERBLOCKS" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_INVALIDATE_BLOCKS" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_FAILURE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_OPEN_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_STARTFILE_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_ADDBLOCK_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RENAMETO_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_DELETE_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_COMPLETEFILE_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_TRYAGAIN" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_LISTING_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_OBTAINLOCK_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RELEASELOCK_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_EXISTS_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_ISDIR_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_MKDIRS_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RENEW_LEASE_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_ABANDONBLOCK_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RAWSTATS_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_DATANODEREPORT_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_DATANODE_HINTS_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_WRITE_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_READ_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_READ_METADATA" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_REPLACE_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_COPY_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_ERROR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_ERROR_CHECKSUM" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_ERROR_INVALID" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_ERROR_EXISTS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_CHECKSUM_OK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DATA_TRANSFER_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version for data transfers between clients and datanodes
+ This should change when serialization of DatanodeInfo, not just
+ when protocol changes. It is not very obvious.]]>
+ </doc>
+ </field>
+ <field name="OPERATION_FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STILL_WAITING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCK_INVALIDATE_CHUNK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HEARTBEAT_INTERVAL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCKREPORT_INTERVAL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCKREPORT_INITIAL_DELAY" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_SOFTLIMIT_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_HARDLIMIT_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ_TIMEOUT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE_TIMEOUT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE_TIMEOUT_EXTENSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_PATH_LENGTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_PATH_DEPTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SMALL_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BLOCK_SIZE" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_DATA_SOCKET_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SIZE_OF_INTEGER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Some handy constants]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.FSConstants -->
+ <!-- start class org.apache.hadoop.dfs.FSConstants.CheckpointStates -->
+ <class name="FSConstants.CheckpointStates" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.FSConstants.CheckpointStates&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.FSConstants.CheckpointStates[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.CheckpointStates"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSConstants.CheckpointStates -->
+ <!-- start class org.apache.hadoop.dfs.FSConstants.DatanodeReportType -->
+ <class name="FSConstants.DatanodeReportType" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.FSConstants.DatanodeReportType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.FSConstants.DatanodeReportType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.DatanodeReportType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSConstants.DatanodeReportType -->
+ <!-- start class org.apache.hadoop.dfs.FSConstants.NodeType -->
+ <class name="FSConstants.NodeType" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.FSConstants.NodeType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.FSConstants.NodeType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Type of the node]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSConstants.NodeType -->
+ <!-- start class org.apache.hadoop.dfs.FSConstants.SafeModeAction -->
+ <class name="FSConstants.SafeModeAction" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.FSConstants.SafeModeAction&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.FSConstants.SafeModeAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.SafeModeAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSConstants.SafeModeAction -->
+ <!-- start class org.apache.hadoop.dfs.FSConstants.StartupOption -->
+ <class name="FSConstants.StartupOption" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.FSConstants.StartupOption&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.FSConstants.StartupOption[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.StartupOption"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSConstants.StartupOption -->
+ <!-- start class org.apache.hadoop.dfs.FSConstants.UpgradeAction -->
+ <class name="FSConstants.UpgradeAction" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.FSConstants.UpgradeAction&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.FSConstants.UpgradeAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.UpgradeAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Distributed upgrade actions:
+
+ 1. Get upgrade status.
+ 2. Get detailed upgrade status.
+ 3. Proceed with the upgrade if it is stuck, no matter what the status is.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSConstants.UpgradeAction -->
+ <!-- start interface org.apache.hadoop.dfs.FSDatasetInterface -->
+ <interface name="FSDatasetInterface" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean"/>
+ <method name="getMetaDataLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the length of the metadata file of the specified block
+ @param b - the block for which the metadata length is desired
+ @return the length of the metadata file for the specified block.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getMetaDataInputStream" return="org.apache.hadoop.dfs.FSDatasetInterface.MetaDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns metaData of block b as an input stream (and its length)
+ @param b - the block
+ @return the metadata input stream;
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="metaFileExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Does the meta file exist for this block?
+ @param b - the block
+ @return true of the metafile for specified block exits
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the specified block's on-disk length (excluding metadata)
+ @param b
+ @return the specified block's on-disk length (excluding metadta)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream to read the contents of the specified block
+ @param b
+ @return an input stream to read the contents of the specified block
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <param name="seekOffset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream at specified offset of the specified block
+ @param b
+ @param seekOffset
+ @return an input stream to read the contents of the specified block,
+ starting at the offset
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeToBlock" return="org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <param name="isRecovery" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the block and returns output streams to write data and CRC
+ @param b
+ @param isRecovery True if this is part of erro recovery, otherwise false
+ @return a BlockWriteStreams object to allow writing the block data
+ and CRC
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalizeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalizes the block previously opened for writing using writeToBlock.
+ The block size is what is in the parameter b and it must match the amount
+ of data written
+ @param b
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unfinalizeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unfinalizes the block previously opened for writing using writeToBlock.
+ The temporary file associated with this block is deleted.
+ @param b
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlockReport" return="org.apache.hadoop.dfs.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the block report - the full list of blocks stored
+ @return - the block report - the full list of blocks stored]]>
+ </doc>
+ </method>
+ <method name="isValidBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <doc>
+ <![CDATA[Is the block valid?
+ @param b
+ @return - true if the specified block is valid]]>
+ </doc>
+ </method>
+ <method name="invalidate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="invalidBlks" type="org.apache.hadoop.dfs.Block[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Invalidates the specified blocks
+ @param invalidBlks - the blocks to be invalidated
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkDataDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ <doc>
+ <![CDATA[Check if all the data directories are healthy
+ @throws DiskErrorException]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stringifies the name of the storage]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shutdown the FSDataset]]>
+ </doc>
+ </method>
+ <method name="getChannelPosition" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <param name="stream" type="org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current offset in the data stream.
+ @param b
+ @param stream The stream to the data file and checksum file
+ @return the position of the file pointer in the data stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setChannelPosition"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <param name="stream" type="org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams"/>
+ <param name="dataOffset" type="long"/>
+ <param name="ckOffset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets the file pointer of the data stream and checksum stream to
+ the specified values.
+ @param b
+ @param stream The stream for the data file and checksum file
+ @param dataOffset The position to which the file pointre for the data stream
+ should be set
+ @param ckOffset The position to which the file pointre for the checksum stream
+ should be set
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is an interface for the underlying storage that stores blocks for
+ a data node.
+ Examples are the FSDataset (which stores blocks on dirs) and
+ SimulatedFSDataset (which simulates data).]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.FSDatasetInterface -->
+ <!-- start class org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams -->
+ <class name="FSDatasetInterface.BlockWriteStreams" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This class contains the output streams for the data and checksum
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams -->
+ <!-- start class org.apache.hadoop.dfs.FSDatasetInterface.MetaDataInputStream -->
+ <class name="FSDatasetInterface.MetaDataInputStream" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides the input stream and length of the metadata
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSDatasetInterface.MetaDataInputStream -->
+ <!-- start class org.apache.hadoop.dfs.GetImageServlet -->
+ <class name="GetImageServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GetImageServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in Namesystem's jetty to retrieve a file.
+ Typically used by the Secondary NameNode to retrieve image and
+ edit file for periodic checkpointing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.GetImageServlet -->
+ <!-- start class org.apache.hadoop.dfs.HftpFileSystem -->
+ <class name="HftpFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HftpFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="openConnection" return="java.net.HttpURLConnection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="query" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open an HTTP connection to the namenode to read file data and metadata.
+ @param path The path component of the URL
+ @param query The query component of the URL]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="nnAddr" type="java.net.InetSocketAddress"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ugi" type="org.apache.hadoop.security.UserGroupInformation"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="df" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of a protocol for accessing filesystems over HTTP.
+ The following implementation provides a limited, read-only interface
+ to a filesystem over HTTP.
+ @see org.apache.hadoop.dfs.ListPathsServlet
+ @see org.apache.hadoop.dfs.FileDataServlet]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.HftpFileSystem -->
+ <!-- start class org.apache.hadoop.dfs.HsftpFileSystem -->
+ <class name="HsftpFileSystem" extends="org.apache.hadoop.dfs.HftpFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HsftpFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="openConnection" return="java.net.HttpURLConnection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="query" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of a protocol for accessing filesystems over HTTPS.
+ The following implementation provides a limited, read-only interface
+ to a filesystem over HTTPS.
+ @see org.apache.hadoop.dfs.ListPathsServlet
+ @see org.apache.hadoop.dfs.FileDataServlet]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.HsftpFileSystem -->
+ <!-- start class org.apache.hadoop.dfs.JspHelper -->
+ <class name="JspHelper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JspHelper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="randomNode" return="org.apache.hadoop.dfs.DatanodeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="bestNode" return="org.apache.hadoop.dfs.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.dfs.LocatedBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="streamBlockInAscii"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="blockId" type="long"/>
+ <param name="blockSize" type="long"/>
+ <param name="offsetIntoBlock" type="long"/>
+ <param name="chunkSizeToView" type="long"/>
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="DFSNodesStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="live" type="java.util.ArrayList&lt;org.apache.hadoop.dfs.DatanodeDescriptor&gt;"/>
+ <param name="dead" type="java.util.ArrayList&lt;org.apache.hadoop.dfs.DatanodeDescriptor&gt;"/>
+ </method>
+ <method name="addTableHeader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableRow"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="columns" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableRow"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="columns" type="java.lang.String[]"/>
+ <param name="row" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableFooter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSafeModeText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInodeLimitText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUpgradeStatusText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="sortNodeList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodes" type="java.util.ArrayList&lt;org.apache.hadoop.dfs.DatanodeDescriptor&gt;"/>
+ <param name="field" type="java.lang.String"/>
+ <param name="order" type="java.lang.String"/>
+ </method>
+ <method name="printPathWithLinks"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.lang.String"/>
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="namenodeInfoPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="printGotoForm"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="namenodeInfoPort" type="int"/>
+ <param name="file" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createTitle"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="file" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="percentageGraph" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="perc" type="int"/>
+ <param name="width" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="percentageGraph" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="perc" type="float"/>
+ <param name="width" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="WEB_UGI_PROPERTY_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="conf" type="org.apache.hadoop.conf.Configuration"
+ transient="false" volatile="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="webUGI" type="org.apache.hadoop.security.UnixUserGroupInformation"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.JspHelper -->
+ <!-- start class org.apache.hadoop.dfs.LeaseExpiredException -->
+ <class name="LeaseExpiredException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LeaseExpiredException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The lease that was being used to create this file has expired.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.LeaseExpiredException -->
+ <!-- start class org.apache.hadoop.dfs.ListPathsServlet -->
+ <class name="ListPathsServlet" extends="org.apache.hadoop.dfs.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ListPathsServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="buildRoot" return="java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="doc" type="org.znerd.xmlenc.XMLOutputter"/>
+ <doc>
+ <![CDATA[Build a map from the query string, setting values and defaults.]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Service a GET request as described below.
+ Request:
+ {@code
+ GET http://<nn>:<port>/listPaths[/<path>][<?option>[&option]*] HTTP/1.1
+ }
+
+ Where <i>option</i> (default) in:
+ recursive (&quot;no&quot;)
+ filter (&quot;.*&quot;)
+ exclude (&quot;\..*\.crc&quot;)
+
+ Response: A flat list of files/directories in the following format:
+ {@code
+ <listing path="..." recursive="(yes|no)" filter="..."
+ time="yyyy-MM-dd hh:mm:ss UTC" version="...">
+ <directory path="..." modified="yyyy-MM-dd hh:mm:ss"/>
+ <file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ" blocksize="..."
+ replication="..." size="..."/>
+ </listing>
+ }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Obtain meta-information about a filesystem.
+ @see org.apache.hadoop.dfs.HftpFileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.ListPathsServlet -->
+ <!-- start class org.apache.hadoop.dfs.LocatedBlocks -->
+ <class name="LocatedBlocks" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getLocatedBlocks" return="java.util.List&lt;org.apache.hadoop.dfs.LocatedBlock&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get located blocks.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.dfs.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[Get located block.]]>
+ </doc>
+ </method>
+ <method name="locatedBlockCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get number of located blocks.]]>
+ </doc>
+ </method>
+ <method name="getFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Collection of blocks with their locations and the file length.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.LocatedBlocks -->
+ <!-- start class org.apache.hadoop.dfs.NameNode -->
+ <class name="NameNode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.dfs.ClientProtocol"/>
+ <implements name="org.apache.hadoop.dfs.DatanodeProtocol"/>
+ <implements name="org.apache.hadoop.dfs.NamenodeProtocol"/>
+ <implements name="org.apache.hadoop.dfs.FSConstants"/>
+ <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start NameNode.
+ <p>
+ The name-node can be started with one of the following startup options:
+ <ul>
+ <li>{@link FSConstants.StartupOption#REGULAR REGULAR} - normal startup</li>
+ <li>{@link FSConstants.StartupOption#FORMAT FORMAT} - format name node</li>
+ <li>{@link FSConstants.StartupOption#UPGRADE UPGRADE} - start the cluster
+ upgrade and create a snapshot of the current file system state</li>
+ <li>{@link FSConstants.StartupOption#ROLLBACK ROLLBACK} - roll the
+ cluster back to the previous state</li>
+ </ul>
+ The option is passed via configuration field:
+ <tt>dfs.namenode.startup</tt>
+
+ The conf will be modified to reflect the actual ports on which
+ the NameNode is up and running if the user passes the port as
+ <code>zero</code> in the conf.
+
+ @param conf confirguration
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="NameNode" type="java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a NameNode at the specified location and start it.
+
+ The conf will be modified to reflect the actual ports on which
+ the NameNode is up and running if the user passes the port as
+ <code>zero</code>.]]>
+ </doc>
+ </constructor>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="format"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Format a new filesystem. Destroys any filesystem that may already
+ exist at this location.]]>
+ </doc>
+ </method>
+ <method name="getNameNodeMetrics" return="org.apache.hadoop.dfs.NameNodeMetrics"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Wait for service to finish.
+ (Normally, it runs forever.)]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop all NameNode threads and wait for all to finish.]]>
+ </doc>
+ </method>
+ <method name="getBlocks" return="org.apache.hadoop.dfs.BlocksWithLocations"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanode" type="org.apache.hadoop.dfs.DatanodeInfo"/>
+ <param name="size" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[return a list of blocks & their locations on <code>datanode</code> whose
+ total size is <code>size</code>
+
+ @param datanode on which blocks are located
+ @param size total size of blocks]]>
+ </doc>
+ </method>
+ <method name="getBlockLocations" return="org.apache.hadoop.dfs.LocatedBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="offset" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permissions" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="addBlock" return="org.apache.hadoop.dfs.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="abandonBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client needs to give up on the block.]]>
+ </doc>
+ </method>
+ <method name="abandonFileInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="complete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.dfs.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client has detected an error on the specified located blocks
+ and is reporting them to the server. For now, the namenode will
+ delete the blocks from the datanodes. In the future we might
+ check the blocks are actually corrupt.]]>
+ </doc>
+ </method>
+ <method name="getPreferredBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="exists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileInfo(String) instead">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileInfo(String) instead]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="renewLease"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getListing" return="org.apache.hadoop.dfs.DFSFileInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileInfo" return="org.apache.hadoop.dfs.DFSFileInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file.
+ @param src The string representation of the path to the file
+ @throws IOException if permission to access file is denied by the system
+ @return object containing information regarding the file
+ or null if file not found]]>
+ </doc>
+ </method>
+ <method name="getStats" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getDatanodeReport" return="org.apache.hadoop.dfs.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.dfs.FSConstants.DatanodeReportType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.dfs.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="isInSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the cluster currently in safe mode?]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getEditLogSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the size of the current edit log.]]>
+ </doc>
+ </method>
+ <method name="rollEditLog" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Roll the edit log.]]>
+ </doc>
+ </method>
+ <method name="rollFsImage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Roll the image]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.dfs.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.dfs.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps namenode state into specified file]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="fsync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="register" return="org.apache.hadoop.dfs.DatanodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="sendHeartbeat" return="org.apache.hadoop.dfs.DatanodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/>
+ <param name="capacity" type="long"/>
+ <param name="dfsUsed" type="long"/>
+ <param name="remaining" type="long"/>
+ <param name="xmitsInProgress" type="int"/>
+ <param name="xceiverCount" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Data node notify the name node that it is alive
+ Return a block-oriented command for the datanode to execute.
+ This will be either a transfer or a delete operation.]]>
+ </doc>
+ </method>
+ <method name="blockReport" return="org.apache.hadoop.dfs.DatanodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/>
+ <param name="blocks" type="long[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockReceived"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/>
+ <param name="blocks" type="org.apache.hadoop.dfs.Block[]"/>
+ <param name="delHints" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="errorReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/>
+ <param name="errorCode" type="int"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="versionRequest" return="org.apache.hadoop.dfs.NamespaceInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="processUpgradeCommand" return="org.apache.hadoop.dfs.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="comm" type="org.apache.hadoop.dfs.UpgradeCommand"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockCrcUpgradeGetBlockLocations" return="org.apache.hadoop.dfs.BlockCrcInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="verifyRequest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify request.
+
+ Verifies correctness of the datanode version, registration ID, and
+ if the datanode does not need to be shutdown.
+
+ @param nodeReg data node registration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="verifyVersion"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="version" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify version.
+
+ @param version
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFsImageName" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the name of the fsImage file]]>
+ </doc>
+ </method>
+ <method name="getFsImageNameCheckpoint" return="java.io.File[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the name of the fsImage file uploaded by periodic
+ checkpointing]]>
+ </doc>
+ </method>
+ <method name="validateCheckpointUpload"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="token" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Validates that this is a valid checkpoint upload request]]>
+ </doc>
+ </method>
+ <method name="checkpointUploadDone"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Indicates that a new checkpoint has been successfully uploaded.]]>
+ </doc>
+ </method>
+ <method name="getFsEditName" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the name of the edits file]]>
+ </doc>
+ </method>
+ <method name="getNameNodeAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the address on which the NameNodes is listening to.
+ @return the address on which the NameNodes is listening to.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="stateChangeLog" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[NameNode serves as both directory namespace manager and
+ "inode table" for the Hadoop DFS. There is a single NameNode
+ running in any DFS deployment. (Well, except when there
+ is a second backup/failover NameNode.)
+
+ The NameNode controls two critical tables:
+ 1) filename->blocksequence (namespace)
+ 2) block->machinelist ("inodes")
+
+ The first table is stored on disk and is very precious.
+ The second table is rebuilt every time the NameNode comes
+ up.
+
+ 'NameNode' refers to both this class as well as the 'NameNode server'.
+ The 'FSNamesystem' class actually performs most of the filesystem
+ management. The majority of the 'NameNode' class itself is concerned
+ with exposing the IPC interface to the outside world, plus some
+ configuration management.
+
+ NameNode implements the ClientProtocol interface, which allows
+ clients to ask for DFS services. ClientProtocol is not
+ designed for direct use by authors of DFS client code. End-users
+ should instead use the org.apache.nutch.hadoop.fs.FileSystem class.
+
+ NameNode also implements the DatanodeProtocol interface, used by
+ DataNode programs that actually store DFS data blocks. These
+ methods are invoked repeatedly and automatically by all the
+ DataNodes in a DFS deployment.
+
+ NameNode also implements the NamenodeProtocol interface, used by
+ secondary namenodes or rebalancing processes to get partial namenode's
+ state, for example partial blocksMap etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.NameNode -->
+ <!-- start class org.apache.hadoop.dfs.NamenodeFsck -->
+ <class name="NamenodeFsck" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NamenodeFsck" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.dfs.NameNode, java.util.Map&lt;java.lang.String, java.lang.String[]&gt;, javax.servlet.http.HttpServletResponse"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filesystem checker.
+ @param conf configuration (namenode config)
+ @param nn namenode that this fsck is going to use
+ @param pmap key=value[] map that is passed to the http servlet as url parameters
+ @param response the object into which this servelet writes the url contents
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="fsck"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check files on DFS, starting from the indicated path.
+ @throws Exception]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FIXING_NONE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Don't attempt any fixing .]]>
+ </doc>
+ </field>
+ <field name="FIXING_MOVE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Move corrupted files to /lost+found .]]>
+ </doc>
+ </field>
+ <field name="FIXING_DELETE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Delete corrupted files.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
+ sub-optimal conditions.
+ <p>The tool scans all files and directories, starting from an indicated
+ root path. The following abnormal conditions are detected and handled:</p>
+ <ul>
+ <li>files with blocks that are completely missing from all datanodes.<br/>
+ In this case the tool can perform one of the following actions:
+ <ul>
+ <li>none ({@link #FIXING_NONE})</li>
+ <li>move corrupted files to /lost+found directory on DFS
+ ({@link #FIXING_MOVE}). Remaining data blocks are saved as a
+ block chains, representing longest consecutive series of valid blocks.</li>
+ <li>delete corrupted files ({@link #FIXING_DELETE})</li>
+ </ul>
+ </li>
+ <li>detect files with under-replicated or over-replicated blocks</li>
+ </ul>
+ Additionally, the tool collects a detailed overall DFS statistics, and
+ optionally can print detailed statistics on block locations and replication
+ factors of each file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.NamenodeFsck -->
+ <!-- start class org.apache.hadoop.dfs.NamenodeFsck.FsckResult -->
+ <class name="NamenodeFsck.FsckResult" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NamenodeFsck.FsckResult"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isHealthy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DFS is considered healthy if there are no missing blocks.]]>
+ </doc>
+ </method>
+ <method name="addMissing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Add a missing block name, plus its size.]]>
+ </doc>
+ </method>
+ <method name="getMissingIds" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a list of missing block names (as list of Strings).]]>
+ </doc>
+ </method>
+ <method name="getMissingSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total size of missing data, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setMissingSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="missingSize" type="long"/>
+ </method>
+ <method name="getExcessiveReplicas" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of over-replicated blocks.]]>
+ </doc>
+ </method>
+ <method name="setExcessiveReplicas"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="overReplicatedBlocks" type="long"/>
+ </method>
+ <method name="getReplicationFactor" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the actual replication factor.]]>
+ </doc>
+ </method>
+ <method name="getMissingReplicas" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of under-replicated blocks. Note: missing blocks are not counted here.]]>
+ </doc>
+ </method>
+ <method name="setMissingReplicas"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="underReplicatedBlocks" type="long"/>
+ </method>
+ <method name="getTotalDirs" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total number of directories encountered during this scan.]]>
+ </doc>
+ </method>
+ <method name="setTotalDirs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalDirs" type="long"/>
+ </method>
+ <method name="getTotalFiles" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total number of files encountered during this scan.]]>
+ </doc>
+ </method>
+ <method name="setTotalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalFiles" type="long"/>
+ </method>
+ <method name="getTotalSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total size of scanned data, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setTotalSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalSize" type="long"/>
+ </method>
+ <method name="getReplication" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the intended replication factor, against which the over/under-
+ replicated blocks are counted. Note: this values comes from the current
+ Configuration supplied for the tool, so it may be different from the
+ value in DFS Configuration.]]>
+ </doc>
+ </method>
+ <method name="setReplication"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="replication" type="int"/>
+ </method>
+ <method name="getTotalBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of blocks in the scanned area.]]>
+ </doc>
+ </method>
+ <method name="setTotalBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalBlocks" type="long"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCorruptFiles" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of currupted files.]]>
+ </doc>
+ </method>
+ <method name="setCorruptFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="corruptFiles" type="long"/>
+ </method>
+ <doc>
+ <![CDATA[FsckResult of checking, plus overall DFS statistics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.NamenodeFsck.FsckResult -->
+ <!-- start class org.apache.hadoop.dfs.NameNodeMetrics -->
+ <class name="NameNodeMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="numFilesCreated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numGetBlockLocations" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesRenamed" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesListed" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="transactions" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="syncs" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockReport" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="safeModeTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="fsImageLoadTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various NameNode statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #syncs}.inc()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.NameNodeMetrics -->
+ <!-- start class org.apache.hadoop.dfs.NotReplicatedYetException -->
+ <class name="NotReplicatedYetException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NotReplicatedYetException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The file has not finished being written to enough datanodes yet.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.NotReplicatedYetException -->
+ <!-- start class org.apache.hadoop.dfs.SafeModeException -->
+ <class name="SafeModeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SafeModeException" type="java.lang.String, org.apache.hadoop.dfs.FSNamesystem.SafeModeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when the name node is in safe mode.
+ Client cannot modified namespace until the safe mode is off.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.SafeModeException -->
+ <!-- start class org.apache.hadoop.dfs.SecondaryNameNode -->
+ <class name="SecondaryNameNode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.dfs.FSConstants"/>
+ <implements name="java.lang.Runnable"/>
+ <constructor name="SecondaryNameNode" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a connection to the primary namenode.]]>
+ </doc>
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down this instance of the datanode.
+ Returns only after shutdown is complete.]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods.
+ @param argv Command line parameters.
+ @exception Exception if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The Secondary NameNode is a helper to the primary NameNode.
+ The Secondary is responsible for supporting periodic checkpoints
+ of the HDFS metadata. The current design allows only one Secondary
+ NameNode per HDFs cluster.
+
+ The Secondary NameNode is a daemon that periodically wakes
+ up (determined by the schedule specified in the configuration),
+ triggers a periodic checkpoint and then goes back to sleep.
+ The Secondary NameNode uses the ClientProtocol to talk to the
+ primary NameNode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.SecondaryNameNode -->
+ <!-- start class org.apache.hadoop.dfs.SecondaryNameNode.GetImageServlet -->
+ <class name="SecondaryNameNode.GetImageServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SecondaryNameNode.GetImageServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in Namesystem's jetty to retrieve a file.
+ Typically used by the Secondary NameNode to retrieve image and
+ edit file for periodic checkpointing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.SecondaryNameNode.GetImageServlet -->
+ <!-- start class org.apache.hadoop.dfs.StreamFile -->
+ <class name="StreamFile" extends="org.apache.hadoop.dfs.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StreamFile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.StreamFile -->
+ <!-- start interface org.apache.hadoop.dfs.Upgradeable -->
+ <interface name="Upgradeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable&lt;org.apache.hadoop.dfs.Upgradeable&gt;"/>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the layout version of the upgrade object.
+ @return layout version]]>
+ </doc>
+ </method>
+ <method name="getType" return="org.apache.hadoop.dfs.FSConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of the software component, which this object is upgrading.
+ @return type]]>
+ </doc>
+ </method>
+ <method name="getDescription" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Description of the upgrade object for displaying.
+ @return description]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Upgrade status determines a percentage of the work done out of the total
+ amount required by the upgrade.
+
+ 100% means that the upgrade is completed.
+ Any value < 100 means it is not complete.
+
+ The return value should provide at least 2 values, e.g. 0 and 100.
+ @return integer value in the range [0, 100].]]>
+ </doc>
+ </method>
+ <method name="startUpgrade" return="org.apache.hadoop.dfs.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Prepare for the upgrade.
+ E.g. initialize upgrade data structures and set status to 0.
+
+ Returns an upgrade command that is used for broadcasting to other cluster
+ components.
+ E.g. name-node informs data-nodes that they must perform a distributed upgrade.
+
+ @return an UpgradeCommand for broadcasting.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="completeUpgrade" return="org.apache.hadoop.dfs.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete upgrade.
+ E.g. cleanup upgrade data structures or write metadata to disk.
+
+ Returns an upgrade command that is used for broadcasting to other cluster
+ components.
+ E.g. data-nodes inform the name-node that they completed the upgrade
+ while other data-nodes are still upgrading.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatusReport" return="org.apache.hadoop.dfs.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status report for the upgrade.
+
+ @param details true if upgradeStatus details need to be included,
+ false otherwise
+ @return {@link UpgradeStatusReport}
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Common interface for distributed upgrade objects.
+
+ Each upgrade object corresponds to a layout version,
+ which is the latest version that should be upgraded using this object.
+ That is all components whose layout version is greater or equal to the
+ one returned by {@link #getVersion()} must be upgraded with this object.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.Upgradeable -->
+ <!-- start class org.apache.hadoop.dfs.UpgradeStatusReport -->
+ <class name="UpgradeStatusReport" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="UpgradeStatusReport"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UpgradeStatusReport" type="int, short, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the layout version of the currently running upgrade.
+ @return layout version]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get upgrade upgradeStatus as a percentage of the total upgrade done.
+
+ @see Upgradeable#getUpgradeStatus()]]>
+ </doc>
+ </method>
+ <method name="isFinalized" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is current upgrade finalized.
+ @return true if finalized or false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getStatusText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <doc>
+ <![CDATA[Get upgradeStatus data as a text for reporting.
+ Should be overloaded for a particular upgrade specific upgradeStatus data.
+
+ @param details true if upgradeStatus details need to be included,
+ false otherwise
+ @return text]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Print basic upgradeStatus details.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="version" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="upgradeStatus" type="short"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="finalized" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Base upgrade upgradeStatus class.
+ Overload this class if specific status fields need to be reported.
+
+ Describes status of current upgrade.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.UpgradeStatusReport -->
+ <doc>
+ <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
+Google's <a href="http://labs.google.com/papers/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files
+have strictly one writer at any one time. Bytes are always appended
+to the end of the writer's stream. There is no notion of "record appends"
+or "mutations" that are then checked or reordered. Writers simply emit
+a byte stream. That byte stream is guaranteed to be stored in the
+order written.</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.dfs.datanode.metrics">
+ <!-- start class org.apache.hadoop.dfs.datanode.metrics.DataNodeMetrics -->
+ <class name="DataNodeMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="DataNodeMetrics" type="org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="bytesWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bytesRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksReplicated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksRemoved" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksVerified" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockVerificationFailures" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readsFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readsFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writesFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writesFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writeBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readMetadataOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="copyBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="replaceBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="heartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockReports" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various DataNode statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #blocksRead}.inc()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.datanode.metrics.DataNodeMetrics -->
+ <!-- start class org.apache.hadoop.dfs.datanode.metrics.DataNodeStatistics -->
+ <class name="DataNodeStatistics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean"/>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shuts down the statistics
+ - unregisters the mbean]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlocksRead" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlocksRemoved" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlocksReplicated" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlocksWritten" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockVerificationFailures" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlocksVerified" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadsFromLocalClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadsFromRemoteClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getWritesFromLocalClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getWritesFromRemoteClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.datanode.metrics.DataNodeStatistics -->
+ <!-- start interface org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean -->
+ <interface name="DataNodeStatisticsMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getBytesRead" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of bytes read in the last interval
+ @return number of bytes read]]>
+ </doc>
+ </method>
+ <method name="getBlocksWritten" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of blocks written in the last interval
+ @return number of blocks written]]>
+ </doc>
+ </method>
+ <method name="getBlocksRead" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of blocks read in the last interval
+ @return number of blocks read]]>
+ </doc>
+ </method>
+ <method name="getBlocksReplicated" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of blocks replicated in the last interval
+ @return number of blocks replicated]]>
+ </doc>
+ </method>
+ <method name="getBlocksRemoved" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of blocks removed in the last interval
+ @return number of blocks removed]]>
+ </doc>
+ </method>
+ <method name="getBlocksVerified" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of blocks verified in the last interval
+ @return number of blocks verified]]>
+ </doc>
+ </method>
+ <method name="getBlockVerificationFailures" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of block verification failures in the last interval
+ @return number of block verification failures]]>
+ </doc>
+ </method>
+ <method name="getReadsFromLocalClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of reads from local clients in the last interval
+ @return number of reads from local clients]]>
+ </doc>
+ </method>
+ <method name="getReadsFromRemoteClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of reads from remote clients in the last interval
+ @return number of reads from remote clients]]>
+ </doc>
+ </method>
+ <method name="getWritesFromLocalClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of writes from local clients in the last interval
+ @return number of writes from local clients]]>
+ </doc>
+ </method>
+ <method name="getWritesFromRemoteClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of writes from remote clients in the last interval
+ @return number of writes from remote clients]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of ReadBlock Operation in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for ReadBlock Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum ReadBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum ReadBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of WriteBlock Operation in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for WriteBlock Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum WriteBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum WriteBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of ReadMetadata Operation in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for ReadMetadata Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum ReadMetadata Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum ReadMetadata Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of CopyBlock Operation in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for CopyBlock Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum CopyBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum CopyBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of ReplaceBlock Operation in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for ReplaceBlock Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum ReplaceBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum ReplaceBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Block Reports sent in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for Block Reports Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum Block Reports Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum Block Reports Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Heartbeat Operation in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for Heartbeat Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum Heartbeat Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum Heartbeat Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all min max times]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX interface for the runtime statistics for the data node.
+ Many of the statistics are sampled and averaged on an interval
+ which can be specified in the config file.
+ <p>
+ For the statistics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ dfs.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically.
+ <p>
+ Name Node Status info is reported in another MBean
+ @see org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean -->
+ <!-- start interface org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean -->
+ <interface name="FSDatasetMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the total space (in bytes) used by dfs datanode
+ @return the total space used by dfs datanode
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns total capacity (in bytes) of storage (used and unused)
+ @return total capacity of storage (used and unused)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the amount of free storage space (in bytes)
+ @return The amount of free storage space
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getStorageInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the storage id of the underlying storage]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This Interface defines the methods to get the status of a the FSDataset of
+ a data node.
+ It is also used for publishing via JMX (hence we follow the JMX naming
+ convention.)
+ <p>
+ Data Node runtime statistic info is report in another MBean
+ @see org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean -->
+</package>
+<package name="org.apache.hadoop.dfs.namenode.metrics">
+ <!-- start interface org.apache.hadoop.dfs.namenode.metrics.FSNamesystemMBean -->
+ <interface name="FSNamesystemMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getFSState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The state of the file system: Safemode or Operational
+ @return the state]]>
+ </doc>
+ </method>
+ <method name="getBlocksTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of allocated blocks in the system
+ @return - number of allocated blocks]]>
+ </doc>
+ </method>
+ <method name="getCapacityTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total storage capacity
+ @return - total capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacityRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Free (unused) storage capacity
+ @return - free capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Used storage capacity
+ @return - used capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getFilesTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total number of files and directories
+ @return - num of files and directories]]>
+ </doc>
+ </method>
+ <method name="numLiveDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Live data nodes
+ @return number of live data nodes]]>
+ </doc>
+ </method>
+ <method name="numDeadDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of dead data nodes
+ @return number of dead data nodes]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This Interface defines the methods to get the status of a the FSNamesystem of
+ a name node.
+ It is also used for publishing via JMX (hence we follow the JMX naming
+ convention.)
+
+ <p>
+ Name Node runtime statistic info is report in another MBean
+ @see org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.namenode.metrics.FSNamesystemMBean -->
+ <!-- start class org.apache.hadoop.dfs.namenode.metrics.NameNodeStatistics -->
+ <class name="NameNodeStatistics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean"/>
+ <constructor name="NameNodeStatistics" type="org.apache.hadoop.dfs.NameNodeMetrics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constructs and registers the NameNodeStatisticsMBean
+ @param nameNodeMetrics - the metrics from which the mbean gets its info]]>
+ </doc>
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shuts down the statistics
+ - unregisters the mbean]]>
+ </doc>
+ </method>
+ <method name="getBlockReportAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getSafemodeTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getFSImageLoadTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getNumFilesCreated" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getNumFilesListed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getNumGetBlockLocations" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getNumFilesRenamed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the implementation of the Name Node JMX MBean]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.namenode.metrics.NameNodeStatistics -->
+ <!-- start interface org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean -->
+ <interface name="NameNodeStatisticsMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getSafemodeTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The time spent in the Safemode at startup
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getFSImageLoadTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Time spent loading the FS Image at startup
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Journal Transactions in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for Journal transactions in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum Journal Transaction Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum Journal Transaction Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getBlockReportNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of block Reports processed in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getBlockReportAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for Block Report Processing in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getBlockReportMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum Block Report Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getBlockReportMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum Block Report Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Journal Syncs in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for Journal Sync in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum Journal Sync Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum Journal Sync Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all min max times]]>
+ </doc>
+ </method>
+ <method name="getNumFilesCreated" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of files created in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getNumGetBlockLocations" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of
+ {@link org.apache.hadoop.dfs.NameNode#getBlockLocations(String,long,long)}
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getNumFilesRenamed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of files renamed in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getNumFilesListed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of files listed in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for getting runtime statistics of
+ the name node.
+ Many of the statistics are sampled and averaged on an interval
+ which can be specified in the config file.
+ <p>
+ For the statistics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ dfs.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically.
+ <p>
+ Name Node Status info is report in another MBean
+ @see org.apache.hadoop.dfs.namenode.metrics.FSNamesystemMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean -->
+</package>
+<package name="org.apache.hadoop.filecache">
+ <!-- start class org.apache.hadoop.filecache.DistributedCache -->
+ <class name="DistributedCache" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedCache"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param fileStatus The file status on the dfs.
+ @param isArchive if the cache is an archive or a file. In case it is an archive
+ with a .zip or .jar extension it will be unzipped/unjarred automatically
+ and the directory where the archive is unjarred is returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param isArchive if the cache is an archive or a file. In case it is an archive
+ with a .zip or .jar extension it will be unzipped/unjarred automatically
+ and the directory where the archive is unjarred is returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="releaseCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is the opposite of getlocalcache. When you are done with
+ using the cache, you need to release the cache
+ @param cache The cache URI to be released
+ @param conf configuration which contains the filesystem the cache
+ is contained in.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeRelative" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTimestamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="cache" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns mtime of a given cache file on hdfs.
+ @param conf configuration
+ @param cache cache file
+ @return mtime of a given cache file on hdfs
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createAllSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="jobCacheDir" type="java.io.File"/>
+ <param name="workDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method create symlinks for all files in a given dir in another directory
+ @param conf the configuration
+ @param jobCacheDir the target directory for creating symlinks
+ @param workDir the directory in which the symlinks are created
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setCacheArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archives" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of archives
+ @param archives The list of archives that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="setCacheFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of files
+ @param files The list of files that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="getCacheArchives" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache archives set in the Configuration
+ @param conf The configuration which contains the archives
+ @return A URI array of the caches set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCacheFiles" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache files set in the Configuration
+ @param conf The configuration which contains the files
+ @return A URI array of the files set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheArchives" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized caches
+ @param conf Configuration that contains the localized archives
+ @return A path array of localized caches
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized files
+ @param conf Configuration that contains the localized files
+ @return A path array of localized files
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getArchiveTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the archives
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFileTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the files
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setArchiveTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the archives to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of archives.
+ The order should be the same as the order in which the archives are added.]]>
+ </doc>
+ </method>
+ <method name="setFileTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the files to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of files.
+ The order should be the same as the order in which the files are added.]]>
+ </doc>
+ </method>
+ <method name="setLocalArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized archives
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+ </doc>
+ </method>
+ <method name="setLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized files
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+ </doc>
+ </method>
+ <method name="addCacheArchive"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a archives to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addCacheFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a file to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addFileToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an file path to the current set of classpath entries It adds the file
+ to cache as well.
+
+ @param file Path of the file to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getFileClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the file entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="addArchiveToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archive" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an archive path to the current set of classpath entries. It adds the
+ archive to cache as well.
+
+ @param archive Path of the archive to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getArchiveClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the archive entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method allows you to create symlinks in the current working directory
+ of the task to all the cache files/archives
+ @param conf the jobconf]]>
+ </doc>
+ </method>
+ <method name="getSymlink" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method checks to see if symlinks are to be create for the
+ localized cache files in the current working directory
+ @param conf the jobconf
+ @return true if symlinks are to be created- else return false]]>
+ </doc>
+ </method>
+ <method name="checkURIs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uriFiles" type="java.net.URI[]"/>
+ <param name="uriArchives" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[This method checks if there is a conflict in the fragment names
+ of the uris. Also makes sure that each uri has a fragment. It
+ is only to be called if you want to create symlinks for
+ the various archives and files.
+ @param uriFiles The uri array of urifiles
+ @param uriArchives the uri array of uri archives]]>
+ </doc>
+ </method>
+ <method name="purgeCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clear the entire contents of the cache and delete the backing files. This
+ should only be used when the server is reinitializing, because the users
+ are going to lose their files.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Distribute application-specific large, read-only files efficiently.
+
+ <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
+ framework to cache files (text, archives, jars etc.) needed by applications.
+ </p>
+
+ <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
+ via the {@link JobConf}. The <code>DistributedCache</code> assumes that the
+ files specified via hdfs:// urls are already present on the
+ {@link FileSystem} at the path specified by the url.</p>
+
+ <p>The framework will copy the necessary files on to the slave node before
+ any tasks for the job are executed on that node. Its efficiency stems from
+ the fact that the files are only copied once per job and the ability to
+ cache archives which are un-archived on the slaves.</p>
+
+ <p><code>DistributedCache</code> can be used to distribute simple, read-only
+ data/text files and/or more complex types such as archives, jars etc.
+ Archives (zip files) are un-archived at the slave nodes. Jars maybe be
+ optionally added to the classpath of the tasks, a rudimentary software
+ distribution mechanism. Files have execution permissions. Optionally users
+ can also direct it to symlink the distributed cache file(s) into
+ the working directory of the task.</p>
+
+ <p><code>DistributedCache</code> tracks modification timestamps of the cache
+ files. Clearly the cache files should not be modified by the application
+ or externally while the job is executing.</p>
+
+ <p>Here is an illustrative example on how to use the
+ <code>DistributedCache</code>:</p>
+ <p><blockquote><pre>
+ // Setting up the cache for the application
+
+ 1. Copy the requisite files to the <code>FileSystem</code>:
+
+ $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
+ $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
+ $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
+
+ 2. Setup the application's <code>JobConf</code>:
+
+ JobConf job = new JobConf();
+ DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
+ job);
+ DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
+ DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
+
+ 3. Use the cached files in the {@link Mapper} or {@link Reducer}:
+
+ public static class MapClass extends MapReduceBase
+ implements Mapper&lt;K, V, K, V&gt; {
+
+ private Path[] localArchives;
+ private Path[] localFiles;
+
+ public void configure(JobConf job) {
+ // Get the cached archives/files
+ localArchives = DistributedCache.getLocalCacheArchives(job);
+ localFiles = DistributedCache.getLocalCacheFiles(job);
+ }
+
+ public void map(K key, V value,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Use data from the cached archives/files here
+ // ...
+ // ...
+ output.collect(k, v);
+ }
+ }
+
+ </pre></blockquote></p>
+
+ @see JobConf
+ @see JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.filecache.DistributedCache -->
+</package>
+<package name="org.apache.hadoop.fs">
+ <!-- start class org.apache.hadoop.fs.BlockLocation -->
+ <class name="BlockLocation" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlockLocation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with host, name, offset and length]]>
+ </doc>
+ </constructor>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hosts (hostname) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of names (hostname:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the length of the block]]>
+ </doc>
+ </method>
+ <method name="setOffset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <doc>
+ <![CDATA[Set the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="setLength"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="length" type="long"/>
+ <doc>
+ <![CDATA[Set the length of block]]>
+ </doc>
+ </method>
+ <method name="setHosts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hosts" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the hosts hosting this block]]>
+ </doc>
+ </method>
+ <method name="setNames"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the names (host:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement write of Writable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement readFields of Writable]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BlockLocation -->
+ <!-- start class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <class name="BufferedFSInputStream" extends="java.io.BufferedInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="BufferedFSInputStream" type="org.apache.hadoop.fs.FSInputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a <code>BufferedFSInputStream</code>
+ with the specified buffer size,
+ and saves its argument, the input stream
+ <code>in</code>, for later use. An internal
+ buffer array of length <code>size</code>
+ is created and stored in <code>buf</code>.
+
+ @param in the underlying input stream.
+ @param size the buffer size.
+ @exception IllegalArgumentException if size <= 0.]]>
+ </doc>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A class optimizes reading from FSInputStream by bufferring]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <!-- start class org.apache.hadoop.fs.ChecksumException -->
+ <class name="ChecksumException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumException" type="java.lang.String, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Thrown for checksum errors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumException -->
+ <!-- start class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <class name="ChecksumFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getApproxChkSumLength" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getRawFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the raw file system]]>
+ </doc>
+ </method>
+ <method name="getChecksumFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return the name of the checksum file associated with a file.]]>
+ </doc>
+ </method>
+ <method name="isChecksumFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return true iff file is a checksum file name.]]>
+ </doc>
+ </method>
+ <method name="getChecksumFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileSize" type="long"/>
+ <doc>
+ <![CDATA[Return the length of the checksum file given the size of the
+ actual file.]]>
+ </doc>
+ </method>
+ <method name="getBytesPerSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the bytes Per Checksum]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="getChecksumLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ <param name="bytesPerSum" type="int"/>
+ <doc>
+ <![CDATA[Calculated the length of the checksum file in bytes.
+ @param size the length of the data file in bytes
+ @param bytesPerSum the number of bytes in a checksum block
+ @return the number of bytes in the checksum file]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+ Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt>
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename files/dirs]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement the delete(Path, boolean) in checksum
+ file system.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="copyCrc" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ If src and dst are directories, the copyCrc parameter
+ determines whether to copy CRC files.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Report a checksum error to the file system.
+ @param f the file name containing the error
+ @param in the stream open on the file
+ @param inPos the position of the beginning of the bad data in the file
+ @param sums the stream open on the checksum file
+ @param sumsPos the position of the beginning of the bad data in the checksum file
+ @return if retry is neccessary]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract Checksumed FileSystem.
+ It provide a basice implementation of a Checksumed FileSystem,
+ which creates a checksum file for each raw file.
+ It generates & verifies checksums at the client side.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ContentSummary -->
+ <class name="ContentSummary" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ContentSummary"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the length]]>
+ </doc>
+ </method>
+ <method name="getDirectoryCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the directory count]]>
+ </doc>
+ </method>
+ <method name="getFileCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the file count]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store the summary of a content (a directory or a file).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ContentSummary -->
+ <!-- start class org.apache.hadoop.fs.DF -->
+ <class name="DF" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DF" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="DF" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFilesystem" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAvailable" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPercentUsed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMount" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="DF_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'df' program.
+ Tested on Linux, FreeBSD, Cygwin.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DF -->
+ <!-- start class org.apache.hadoop.fs.DU -->
+ <class name="DU" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DU" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="DU" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="decDfsUsed"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ </method>
+ <method name="incDfsUsed"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'du' program]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DU -->
+ <!-- start class org.apache.hadoop.fs.FileStatus -->
+ <class name="FileStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <constructor name="FileStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this a directory?
+ @return true if this is a directory]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the block size of the file.
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the replication factor of a file.
+ @return the replication factor of a file.]]>
+ </doc>
+ </method>
+ <method name="getModificationTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the modification time of the file.
+ @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get FsPermission associated with the file.
+ @return permssion. If a filesystem does not have a notion of permissions
+ or if permissions could not be determined, then default
+ permissions equivalent of "rwxrwxrwx" is returned.]]>
+ </doc>
+ </method>
+ <method name="getOwner" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the owner of the file.
+ @return owner of the file. The string could be empty if there is no
+ notion of owner of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getGroup" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the group associated with the file.
+ @return group for the file. The string could be empty if there is no
+ notion of group of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Sets permission.
+ @param permission if permission is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="owner" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets owner.
+ @param owner if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setGroup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets group.
+ @param group if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare this object to another object
+
+ @param o the object to be compared.
+ @return a negative integer, zero, or a positive integer as this object
+ is less than, equal to, or greater than the specified object.
+
+ @throws ClassCastException if the specified object's is not of
+ type FileStatus]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare if this object is equal to another object
+ @param o the object to be compared.
+ @return true if two file status has the same path name; false if not.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for the object, which is defined as
+ the hash code of the path name.
+
+ @return a hash code value for the path name.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that represents the client side information for a file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileStatus -->
+ <!-- start class org.apache.hadoop.fs.FileSystem -->
+ <class name="FileSystem" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="FileSystem"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseArgs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="i" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the cmd-line args, starting at i. Remove consumed args
+ from array. We expect param in the form:
+ '-local | -dfs <namenode:port>']]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the configured filesystem implementation.]]>
+ </doc>
+ </method>
+ <method name="getDefaultUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default filesystem URI from a configuration.
+ @param conf the configuration to access
+ @return the uri of the default filesystem]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.net.URI"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="getNamed" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="call #get(URI,Configuration) instead.">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated call #get(URI,Configuration) instead.]]>
+ </doc>
+ </method>
+ <method name="getLocal" return="org.apache.hadoop.fs.LocalFileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the local file syste
+ @param conf the configuration to configure the file system with
+ @return a LocalFileSystem]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the FileSystem for this URI's scheme and authority. The scheme
+ of the URI determines a configuration property name,
+ <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
+ The entire URI is passed to the FileSystem instance's initialize method.]]>
+ </doc>
+ </method>
+ <method name="closeAll"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all cached filesystems. Be sure those filesystems are not
+ used anymore.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a file with the provided permission
+ The permission of the file is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ It is implemented using two RPCs. It is understood that it is inefficient,
+ but the implementation is thread-safe. The other option is to change the
+ value of umask in configuration to be 0, but it is not thread-safe.
+
+ @param fs file system handle
+ @param file the name of the file to be created
+ @param permission the permission of the file
+ @return an output stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a directory with the provided permission
+ The permission of the directory is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ @see #create(FileSystem, Path, FsPermission)
+
+ @param fs file system handle
+ @param dir the name of the directory to be created
+ @param permission the permission of the directory
+ @return true if the directory creation succeeds; false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileCacheHints" return="java.lang.String[][]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileBlockLocations() instead
+
+ Return a 2D array of size 1x1 or greater, containing hostnames
+ where portions of the given file can be found. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileBlockLocations() instead
+
+ Return a 2D array of size 1x1 or greater, containing hostnames
+ where portions of the given file can be found. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file to open]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param permission
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize
+ @param progress
+ @throws IOException
+ @see #setPermission(Path, FsPermission)]]>
+ </doc>
+ </method>
+ <method name="createNewFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the given Path as a brand-new zero-length file. If
+ create fails, or if it already existed, return false.]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get replication.
+
+ @deprecated Use getFileStatus() instead
+ @param src file name
+ @return file replication
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file.
+
+ @param f the path to delete.
+ @param recursive if path is a directory and set to
+ true, the directory is deleted else throws an exception. In
+ case of a file the recursive can be set to either true or false.
+ @return true if delete is successful else false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="exists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if exists.
+ @param f source file]]>
+ </doc>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[True iff the named path is a regular file.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getContentLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #getContentSummary(Path)}.">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the number of bytes of the given path
+ If <i>f</i> is a file, return the size of the file;
+ If <i>f</i> is a directory, return the size of the directory tree
+ @deprecated Use {@link #getContentSummary(Path)}.]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the {@link ContentSummary} of a given {@link Path}.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given path using the user-supplied path
+ filter.
+
+ @param f
+ a path name
+ @param filter
+ the user-supplied path filter
+ @return an array of FileStatus objects for the files under the given path
+ after applying the filter
+ @throws IOException
+ if encounter any problem while fetching the status]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using default
+ path filter.
+
+ @param files
+ a list of paths
+ @return a list of statuses for the files under the given paths after
+ applying the filter default Path filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using user-supplied
+ path filter.
+
+ @param files
+ a list of paths
+ @param filter
+ the user-supplied path filter
+ @return a list of statuses for the files under the given paths after
+ applying the filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Return all the files that match filePattern and are not checksum
+ files. Results are sorted by their names.
+
+ <p>
+ A filename pattern is composed of <i>regular</i> characters and
+ <i>special pattern matching</i> characters, which are:
+
+ <dl>
+ <dd>
+ <dl>
+ <p>
+ <dt> <tt> ? </tt>
+ <dd> Matches any single character.
+
+ <p>
+ <dt> <tt> * </tt>
+ <dd> Matches zero or more characters.
+
+ <p>
+ <dt> <tt> [<i>abc</i>] </tt>
+ <dd> Matches a single character from character set
+ <tt>{<i>a,b,c</i>}</tt>.
+
+ <p>
+ <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
+ <dd> Matches a single character from the character range
+ <tt>{<i>a...b</i>}</tt>. Note that character <tt><i>a</i></tt> must be
+ lexicographically less than or equal to character <tt><i>b</i></tt>.
+
+ <p>
+ <dt> <tt> [^<i>a</i>] </tt>
+ <dd> Matches a single character that is not from character set or range
+ <tt>{<i>a</i>}</tt>. Note that the <tt>^</tt> character must occur
+ immediately to the right of the opening bracket.
+
+ <p>
+ <dt> <tt> \<i>c</i> </tt>
+ <dd> Removes (escapes) any special meaning of character <i>c</i>.
+
+ <p>
+ <dt> <tt> {ab,cd} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt>
+
+ <p>
+ <dt> <tt> {ab,c{de,fh}} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cde, cfh</i>}</tt>
+
+ </dl>
+ </dd>
+ </dl>
+
+ @param pathPattern a regular expression specifying a pth pattern
+
+ @return an array of paths that match the path pattern
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array of FileStatus objects whose path names match pathPattern
+ and is accepted by the user-supplied path filter. Results are sorted by
+ their path names.
+ Return null if pathPattern has no glob and the path does not exist.
+ Return an empty array if pathPattern has a glob and no path matches it.
+
+ @param pathPattern
+ a regular expression specifying the path pattern
+ @param filter
+ a user-supplied path filter
+ @return an array of FileStatus objects
+ @throws IOException if any I/O error occurs when fetching file status]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the current user's home directory in this filesystem.
+ The default implementation returns "/user/$USER/".]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param new_dir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #mkdirs(Path, FsPermission)} with default permission.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make the given file and all non-existent parents into
+ directories. Has the semantics of Unix 'mkdir -p'.
+ Existence of the directory hierarchy is not an error.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name and the source is kept intact afterwards]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files are on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="moveToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ Remove the source afterwards]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[No more filesystem operations are needed. Will
+ release any held locks.]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total size of all files in the filesystem.]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a file status object that represents the path.
+ @param f The path we want information from
+ @return a FileStatus object
+ @throws FileNotFoundException when the path does not exist;
+ IOException see specific implementation]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permission of a path.
+ @param p
+ @param permission]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set owner of a path (i.e. a file or a directory).
+ The parameters username and groupname cannot both be null.
+ @param p The path
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.]]>
+ </doc>
+ </method>
+ <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class&lt;? extends org.apache.hadoop.fs.FileSystem&gt;"/>
+ <doc>
+ <![CDATA[Get the statistics for a particular file system
+ @param cls the class to lookup
+ @return a statistics object]]>
+ </doc>
+ </method>
+ <method name="printStatistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="statistics" type="org.apache.hadoop.fs.FileSystem.Statistics"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The statistics for this file system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An abstract base class for a fairly generic filesystem. It
+ may be implemented as a distributed filesystem, or as a "local"
+ one that reflects the locally-connected disk. The local version
+ exists for small Hadoop instances and for testing.
+
+ <p>
+
+ All user code that may potentially use the Hadoop Distributed
+ File System should be written to use a FileSystem object. The
+ Hadoop DFS is a multi-machine system that appears as a single
+ disk. It's useful because of its fault tolerance and potentially
+ very large capacity.
+
+ <p>
+ The local implementation is {@link LocalFileSystem} and distributed
+ implementation is {@link DistributedFileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem -->
+ <!-- start class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <class name="FileSystem.Statistics" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="incrementBytesRead"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes read in the statistics
+ @param newBytes the additional bytes read]]>
+ </doc>
+ </method>
+ <method name="incrementBytesWritten"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes written in the statistics
+ @param newBytes the additional bytes written]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes read
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes written
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <!-- start class org.apache.hadoop.fs.FileUtil -->
+ <class name="FileUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path
+
+ @param stats
+ an array of FileStatus objects
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path.
+ If stats if null, return path
+ @param stats
+ an array of FileStatus objects
+ @param path
+ default path to return in stats is null
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="fullyDelete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a directory and all its contents. If
+ we return false, the directory may be partially-deleted.]]>
+ </doc>
+ </method>
+ <method name="fullyDelete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recursively delete a directory.
+
+ @param fs {@link FileSystem} on which the path is present
+ @param dir directory to recursively delete
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copyMerge" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dstFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="addString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy all files in a directory to one output file (merge).]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy local files to a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="java.io.File"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy FileSystem files to local files.]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param filename The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="getDU" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Takes an input dir and returns the du on that local directory. Very basic
+ implementation.
+
+ @param dir
+ The input dir to get the disk space of this local dir
+ @return The total disk space of the input local directory]]>
+ </doc>
+ </method>
+ <method name="unZip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="unzipDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a File input it will unzip the file in a the unzip directory
+ passed as the second parameter
+ @param inFile The zip file as input
+ @param unzipDir The unzip directory where to unzip the zip file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="symLink" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="linkname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a soft link between a src and destination
+ only on a local disk. HDFS does not support this
+ @param target the target for symlink
+ @param linkname the symlink
+ @return value returned by the command]]>
+ </doc>
+ </method>
+ <method name="chmod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="perm" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Change the permissions on a filename.
+ @param filename the name of the file to change
+ @param perm the permission string
+ @return the exit code from the command
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="createLocalTempFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="basefile" type="java.io.File"/>
+ <param name="prefix" type="java.lang.String"/>
+ <param name="isDeleteOnExit" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a tmp file for a base file.
+ @param basefile the base file of the tmp
+ @param prefix file name prefix of tmp
+ @param isDeleteOnExit if true, the tmp will be deleted when the VM exits
+ @return a newly created tmp file
+ @exception IOException If a tmp file cannot created
+ @see java.io.File#createTempFile(String, String, File)
+ @see java.io.File#deleteOnExit()]]>
+ </doc>
+ </method>
+ <method name="replaceFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="target" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move the src file to the name specified by target.
+ @param src the source file
+ @param target the target file
+ @exception IOException If this operation fails]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of file-processing util methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil -->
+ <!-- start class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <class name="FileUtil.HardLink" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil.HardLink"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createHardLink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.io.File"/>
+ <param name="linkName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a hardlink]]>
+ </doc>
+ </method>
+ <method name="getLinkCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Retrieves the number of links to the specified file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Class for creating hardlinks.
+ Supports Unix, Cygwin, WindXP.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <!-- start class org.apache.hadoop.fs.FilterFileSystem -->
+ <class name="FilterFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FilterFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List files in a directory.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param newDir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get file status.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A <code>FilterFileSystem</code> contains
+ some other file system, which it uses as
+ its basic file system, possibly transforming
+ the data along the way or providing additional
+ functionality. The class <code>FilterFileSystem</code>
+ itself simply overrides all methods of
+ <code>FileSystem</code> with versions that
+ pass all requests to the contained file
+ system. Subclasses of <code>FilterFileSystem</code>
+ may further override some of these methods
+ and may also provide additional methods
+ and fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FilterFileSystem -->
+ <!-- start class org.apache.hadoop.fs.FSDataInputStream -->
+ <class name="FSDataInputStream" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSDataInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="desired" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
+ and buffers input through a {@link BufferedInputStream}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSDataOutputStream -->
+ <class name="FSDataOutputStream" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWrappedStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link OutputStream} in a {@link DataOutputStream},
+ buffers output through a {@link BufferedOutputStream} and creates a checksum
+ file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataOutputStream -->
+ <!-- start class org.apache.hadoop.fs.FSError -->
+ <class name="FSError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Thrown for unexpected filesystem errors, presumed to reflect disk errors
+ in the native filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSError -->
+ <!-- start class org.apache.hadoop.fs.FSInputChecker -->
+ <class name="FSInputChecker" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs]]>
+ </doc>
+ </constructor>
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int, boolean, java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs
+ @param sum the type of Checksum engine
+ @param chunkSize maximun chunk size
+ @param checksumSize the number byte of each checksum]]>
+ </doc>
+ </constructor>
+ <method name="readChunk" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads in next checksum chunk data into <code>buf</code> at <code>offset</code>
+ and checksum into <code>checksum</code>.
+ The method is used for implementing read, therefore, it should be optimized
+ for sequential reading
+ @param pos chunkPos
+ @param buf desitination buffer
+ @param offset offset in buf at which to store data
+ @param len maximun number of bytes to read
+ @return number of bytes read]]>
+ </doc>
+ </method>
+ <method name="getChunkPosition" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <doc>
+ <![CDATA[Return position of beginning of chunk containing pos.
+
+ @param pos a postion in the file
+ @return the starting position of the chunk which contains the byte]]>
+ </doc>
+ </method>
+ <method name="needChecksum" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if there is a need for checksum verification]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read one checksum-verified byte
+
+ @return the next byte of data, or <code>-1</code> if the end of the
+ stream is reached.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read checksum verified bytes from this byte-input stream into
+ the specified byte array, starting at the given offset.
+
+ <p> This method implements the general contract of the corresponding
+ <code>{@link InputStream#read(byte[], int, int) read}</code> method of
+ the <code>{@link InputStream}</code> class. As an additional
+ convenience, it attempts to read as many bytes as possible by repeatedly
+ invoking the <code>read</code> method of the underlying stream. This
+ iterated <code>read</code> continues until one of the following
+ conditions becomes true: <ul>
+
+ <li> The specified number of bytes have been read,
+
+ <li> The <code>read</code> method of the underlying stream returns
+ <code>-1</code>, indicating end-of-file.
+
+ </ul> If the first <code>read</code> on the underlying stream returns
+ <code>-1</code> to indicate end-of-file then this method returns
+ <code>-1</code>. Otherwise this method returns the number of bytes
+ actually read.
+
+ @param b destination buffer.
+ @param off offset at which to start storing bytes.
+ @param len maximum number of bytes to read.
+ @return the number of bytes read, or <code>-1</code> if the end of
+ the stream has been reached.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if any checksum error occurs]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over and discards <code>n</code> bytes of data from the
+ input stream.
+
+ <p>This method may skip more bytes than are remaining in the backing
+ file. This produces no exception and the number of bytes skipped
+ may include some number of bytes that were beyond the EOF of the
+ backing file. Attempting to read from the stream after skipping past
+ the end will result in -1 indicating the end of the file.
+
+<p>If <code>n</code> is negative, no bytes are skipped.
+
+ @param n the number of bytes to be skipped.
+ @return the actual number of bytes skipped.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to skip to is corrupted]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given position in the stream.
+ The next read() will be from that position.
+
+ <p>This method may seek past the end of the file.
+ This produces no exception and an attempt to read from
+ the stream will result in -1 indicating the end of the file.
+
+ @param pos the postion to seek to.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to seek to is corrupted]]>
+ </doc>
+ </method>
+ <method name="readFully" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="stm" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A utility function that tries to read up to <code>len</code> bytes from
+ <code>stm</code>
+
+ @param stm an input stream
+ @param buf destiniation buffer
+ @param offset offset at which to store data
+ @param len number of bytes to read
+ @return actual number of bytes read
+ @throws IOException if there is any IO error]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sum" type="java.util.zip.Checksum"/>
+ <param name="maxChunkSize" type="int"/>
+ <param name="checksumSize" type="int"/>
+ <doc>
+ <![CDATA[Set the checksum related parameters
+ @param sum which type of checksum to use
+ @param maxChunkSize maximun chunk size
+ @param checksumSize checksum size]]>
+ </doc>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="readlimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="file" type="org.apache.hadoop.fs.Path"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file name from which data is read from]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This is a generic input stream for verifying checksums for
+ data before it is read by a user.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputChecker -->
+ <!-- start class org.apache.hadoop.fs.FSInputStream -->
+ <class name="FSInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSInputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="seek"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[FSInputStream is a generic old InputStream with a little bit
+ of RAF-style seek ability.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSOutputSummer -->
+ <class name="FSOutputSummer" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSOutputSummer" type="java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="writeChunk"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write one byte]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes <code>len</code> bytes from the specified byte array
+ starting at offset <code>off</code> and generate a checksum for
+ each data chunk.
+
+ <p> This method stores bytes from the given array into this
+ stream's buffer before it gets checksumed. The buffer gets checksumed
+ and flushed to the underlying output stream when all data
+ in a checksum chunk are in the buffer. If the buffer is empty and
+ requested length is at least as large as the size of next checksum chunk
+ size, this method will checksum and write the chunk directly
+ to the underlying output stream. Thus it avoids uneccessary data copy.
+
+ @param b the data.
+ @param off the start offset in the data.
+ @param len the number of bytes to write.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This is a generic output stream for generating checksums for
+ data before it is written to the underlying stream]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSOutputSummer -->
+ <!-- start class org.apache.hadoop.fs.FsShell -->
+ <class name="FsShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="FsShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentTrashDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the Trash object associated with this shell.]]>
+ </doc>
+ </method>
+ <method name="byteDesc" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="long"/>
+ <doc>
+ <![CDATA[Return an abbreviated English-language desc of the byte length]]>
+ </doc>
+ </method>
+ <method name="limitDecimalTo2" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dateForm" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="modifFmt" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provide command line access to a FileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsShell -->
+ <!-- start class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <class name="InMemoryFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InMemoryFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InMemoryFileSystem" type="java.net.URI, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reserveSpaceWithCheckSum" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Register a file with its size. This will also register a checksum for the
+ file that the user is trying to create. This is required since none of
+ the FileSystem APIs accept the size of the file as argument. But since it
+ is required for us to apriori know the size of the file we are going to
+ create, the user must call this method for each file he wants to create
+ and reserve memory for that file. We either succeed in reserving memory
+ for both the main file and the checksum file and return true, or return
+ false.]]>
+ </doc>
+ </method>
+ <method name="getFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getNumFiles" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getFSSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPercentUsed" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of the in-memory filesystem. This implementation assumes
+ that the file lengths are known ahead of time and the total lengths of all
+ the files is below a certain number (like 100 MB, configurable). Use the API
+ reserveSpaceWithCheckSum(Path f, int size) (see below for a description of
+ the API for reserving space in the FS. The uri of this filesystem starts with
+ ramfs:// .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <!-- start class org.apache.hadoop.fs.LocalDirAllocator -->
+ <class name="LocalDirAllocator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalDirAllocator" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an allocator object
+ @param contextCfgItemName]]>
+ </doc>
+ </constructor>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. This method should be used if the size of
+ the file is not known apriori. We go round-robin over the set of disks
+ (via the configured dirs) and return the first complete path where
+ we could create the parent directory of the passed path.
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. Pass size as -1 if not known apriori. We
+ round-robin over the set of disks (via the configured dirs) and return
+ the first complete path which has enough space
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathToRead" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS for reading. We search through all the
+ configured dirs for the file's existence and return the complete
+ path to the file when we find one
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createTmpFileForWrite" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a temporary file in the local FS. Pass size as -1 if not known
+ apriori. We round-robin over the set of disks (via the configured dirs)
+ and select the first complete path which has enough space. A file is
+ created on this directory. The file is guaranteed to go away when the
+ JVM exits.
+ @param pathStr prefix for the temporary file
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return a unique temporary file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isContextValid" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextCfgItemName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Method to check whether a context is valid
+ @param contextCfgItemName
+ @return true/false]]>
+ </doc>
+ </method>
+ <method name="ifExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[We search through all the configured dirs for the file's existence
+ and return true when we find
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return true if files exist. false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of a round-robin scheme for disk allocation for creating
+ files. The way it works is that it is kept track what disk was last
+ allocated for a file write. For the current request, the next disk from
+ the set of disks would be allocated if the free space on the disk is
+ sufficient enough to accomodate the file that is being considered for
+ creation. If the space requirements cannot be met, the next disk in order
+ would be tried and so on till a disk is found with sufficient capacity.
+ Once a disk with sufficient space is identified, a check is done to make
+ sure that the disk is writable. Also, there is an API provided that doesn't
+ take the space requirements into consideration but just checks whether the
+ disk under consideration is writable (this should be used for cases where
+ the file size is not known apriori). An API is provided to read a path that
+ was created earlier. That API works by doing a scan of all the disks for the
+ input pathname.
+ This implementation also provides the functionality of having multiple
+ allocators per JVM (one for each unique functionality or context, like
+ mapred, dfs-client, etc.). It ensures that there is only one instance of
+ an allocator per context per JVM.
+ Note:
+ 1. The contexts referred above are actually the configuration items defined
+ in the Configuration class like "mapred.local.dir" (for which we want to
+ control the dir allocations). The context-strings are exactly those
+ configuration items.
+ 2. This implementation does not take into consideration cases where
+ a disk becomes read-only or goes out of space while a file is being written
+ to (disks are shared between multiple processes, and so the latter situation
+ is probable).
+ 3. In the class implementation, "Disk" is referred to as "Dir", which
+ actually points to the configured directory on the Disk which will be the
+ parent for all file write/read allocations.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalDirAllocator -->
+ <!-- start class org.apache.hadoop.fs.LocalFileSystem -->
+ <class name="LocalFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocalFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Moves files to a bad file directory on the same device, so that their
+ storage will not be reused.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the checksumed local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalFileSystem -->
+ <!-- start class org.apache.hadoop.fs.Path -->
+ <class name="Path" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="Path" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a path from a String. Path strings are URIs, but with
+ unescaped elements and some additional normalization.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Path from components.]]>
+ </doc>
+ </constructor>
+ <method name="toUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this to a URI.]]>
+ </doc>
+ </method>
+ <method name="getFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the FileSystem that owns this Path.]]>
+ </doc>
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True if the directory of this path is absolute.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the final component of this path.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the parent of a path or null if at root.]]>
+ </doc>
+ </method>
+ <method name="suffix" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a suffix to the final name in the path.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="depth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of elements in this path.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <doc>
+ <![CDATA[Returns a qualified path object.]]>
+ </doc>
+ </method>
+ <field name="SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The directory separator, a slash.]]>
+ </doc>
+ </field>
+ <field name="SEPARATOR_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CUR_DIR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Names a file or directory in a {@link FileSystem}.
+ Path strings use slash as the directory separator. A path string is
+ absolute if it begins with a slash.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Path -->
+ <!-- start interface org.apache.hadoop.fs.PathFilter -->
+ <interface name="PathFilter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Tests whether or not the specified abstract pathname should be
+ included in a pathname list.
+
+ @param path The abstract pathname to be tested
+ @return <code>true</code> if and only if <code>pathname</code>
+ should be included]]>
+ </doc>
+ </method>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PathFilter -->
+ <!-- start interface org.apache.hadoop.fs.PositionedReadable -->
+ <interface name="PositionedReadable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read upto the specified number of bytes, from a given
+ position within a file, and return the number of bytes read. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the specified number of bytes, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read number of bytes equalt to the length of the buffer, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits positional reading.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PositionedReadable -->
+ <!-- start class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <class name="RawLocalFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RawLocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the specified directory hierarchy. Does not
+ treat existence as an error.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsWorkingFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chown to set owner.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chmod to set permission.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the raw local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <!-- start interface org.apache.hadoop.fs.Seekable -->
+ <interface name="Seekable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits seeking.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Seekable -->
+ <!-- start class org.apache.hadoop.fs.ShellCommand -->
+ <class name="ShellCommand" extends="org.apache.hadoop.util.Shell"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link Shell} instead.">
+ <constructor name="ShellCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A base class for running a unix command like du or df.
+ @deprecated Use {@link Shell} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ShellCommand -->
+ <!-- start class org.apache.hadoop.fs.Trash -->
+ <class name="Trash" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Trash" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a trash can accessor.
+ @param conf a Configuration]]>
+ </doc>
+ </constructor>
+ <method name="moveToTrash" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move a file or directory to the current trash directory.
+ @return false if the item is already in the trash or trash is disabled]]>
+ </doc>
+ </method>
+ <method name="checkpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a trash checkpoint.]]>
+ </doc>
+ </method>
+ <method name="expunge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete old checkpoints.]]>
+ </doc>
+ </method>
+ <method name="getEmptier" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a {@link Runnable} that periodically empties the trash of all
+ users, intended to be run by the superuser. Only one checkpoint is kept
+ at a time.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Run an emptier.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provides a <i>trash</i> feature. Files are moved to a user's trash
+ directory, a subdirectory of their home directory named ".Trash". Files are
+ initially moved to a <i>current</i> sub-directory of the trash directory.
+ Within that sub-directory their original path is preserved. Periodically
+ one may checkpoint the current trash and remove older checkpoints. (This
+ design permits trash management without enumeration of the full trash
+ content, without date support in the filesystem, and without clock
+ synchronization.)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Trash -->
+ <doc>
+ <![CDATA[An abstract file system API.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.fs.kfs">
+ <!-- start class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+ <class name="KosmosFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="KosmosFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getContentLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return null if the file doesn't exist; otherwise, get the
+ locations of the various chunks of the file file from KFS.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A FileSystem backed by KFS.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+ <doc>
+ <![CDATA[<h1>A client for the Kosmos filesystem (KFS)</h1>
+
+<h3>Introduction</h3>
+
+This pages describes how to use Kosmos Filesystem
+(<a href="http://kosmosfs.sourceforge.net"> KFS </a>) as a backing
+store with Hadoop. This page assumes that you have downloaded the
+KFS software and installed necessary binaries as outlined in the KFS
+documentation.
+
+<h3>Steps</h3>
+
+ <ul>
+ <li>In the Hadoop conf directory edit hadoop-default.xml,
+ add the following:
+ <pre>
+&lt;property&gt;
+ &lt;name&gt;fs.kfs.impl&lt;/name&gt;
+ &lt;value&gt;org.apache.hadoop.fs.kfs.KosmosFileSystem&lt;/value&gt;
+ &lt;description&gt;The FileSystem for kfs: uris.&lt;/description&gt;
+&lt;/property&gt;
+ </pre>
+
+ <li>In the Hadoop conf directory edit hadoop-site.xml,
+ adding the following (with appropriate values for
+ &lt;server&gt; and &lt;port&gt;):
+ <pre>
+&lt;property&gt;
+ &lt;name&gt;fs.default.name&lt;/name&gt;
+ &lt;value&gt;kfs://&lt;server:port&gt;&lt;/value&gt;
+&lt;/property&gt;
+
+&lt;property&gt;
+ &lt;name&gt;fs.kfs.metaServerHost&lt;/name&gt;
+ &lt;value&gt;&lt;server&gt;&lt;/value&gt;
+ &lt;description&gt;The location of the KFS meta server.&lt;/description&gt;
+&lt;/property&gt;
+
+&lt;property&gt;
+ &lt;name&gt;fs.kfs.metaServerPort&lt;/name&gt;
+ &lt;value&gt;&lt;port&gt;&lt;/value&gt;
+ &lt;description&gt;The location of the meta server's port.&lt;/description&gt;
+&lt;/property&gt;
+
+</pre>
+ </li>
+
+ <li>Copy KFS's <i> kfs-0.1.jar </i> to Hadoop's lib directory. This step
+ enables Hadoop's to load the KFS specific modules. Note
+ that, kfs-0.1.jar was built when you compiled KFS source
+ code. This jar file contains code that calls KFS's client
+ library code via JNI; the native code is in KFS's <i>
+ libkfsClient.so </i> library.
+ </li>
+
+ <li> When the Hadoop map/reduce trackers start up, those
+processes (on local as well as remote nodes) will now need to load
+KFS's <i> libkfsClient.so </i> library. To simplify this process, it is advisable to
+store libkfsClient.so in an NFS accessible directory (similar to where
+Hadoop binaries/scripts are stored); then, modify Hadoop's
+conf/hadoop-env.sh adding the following line and providing suitable
+value for &lt;path&gt;:
+<pre>
+export LD_LIBRARY_PATH=&lt;path&gt;
+</pre>
+
+
+ <li>Start only the map/reduce trackers
+ <br />
+ example: execute Hadoop's bin/start-mapred.sh</li>
+ </ul>
+<br/>
+
+If the map/reduce job trackers start up, all file-I/O is done to KFS.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.fs.permission">
+ <!-- start class org.apache.hadoop.fs.permission.AccessControlException -->
+ <class name="AccessControlException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AccessControlException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor is needed for unwrapping from
+ {@link org.apache.hadoop.ipc.RemoteException}.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an {@link AccessControlException}
+ with the specified detail message.
+ @param s the detail message.]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[An exception class for access control related issues.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.AccessControlException -->
+ <!-- start class org.apache.hadoop.fs.permission.FsAction -->
+ <class name="FsAction" extends="java.lang.Enum&lt;org.apache.hadoop.fs.permission.FsAction&gt;"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.fs.permission.FsAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="implies" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[Return true if this action implies that action.
+ @param that]]>
+ </doc>
+ </method>
+ <method name="and" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[AND operation.]]>
+ </doc>
+ </method>
+ <method name="or" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[OR operation.]]>
+ </doc>
+ </method>
+ <method name="not" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[NOT operation.]]>
+ </doc>
+ </method>
+ <field name="INDEX" type="int"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Octal representation]]>
+ </doc>
+ </field>
+ <field name="SYMBOL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Symbolic representation]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[File system actions, e.g. read, write, etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsAction -->
+ <!-- start class org.apache.hadoop.fs.permission.FsPermission -->
+ <class name="FsPermission" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given {@link FsAction}.
+ @param u user action
+ @param g group action
+ @param o other action]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="short"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given mode.
+ @param mode
+ @see #toShort()]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor
+
+ @param other other permission]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="permission" type="short"/>
+ <doc>
+ <![CDATA[Create an immutable {@link FsPermission} object.]]>
+ </doc>
+ </method>
+ <method name="getUserAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getGroupAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getOtherAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return other {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link FsPermission} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="toShort" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Encode the object to a short.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply a umask to this permission and return a new one]]>
+ </doc>
+ </method>
+ <method name="getUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="setUMask"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Set the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="getDefault" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default permission.]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unixSymbolicPermission" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create a FsPermission from a Unix symbolic permission string
+ @param unixSymbolicPermission e.g. "-rw-rw-rw-"]]>
+ </doc>
+ </method>
+ <field name="UMASK_LABEL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[umask property label]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_UMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A class for file/directory permissions.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsPermission -->
+ <!-- start class org.apache.hadoop.fs.permission.PermissionStatus -->
+ <class name="PermissionStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="PermissionStatus" type="java.lang.String, java.lang.String, org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <param name="group" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Create an immutable {@link PermissionStatus} object.]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user name]]>
+ </doc>
+ </method>
+ <method name="getGroupName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group name]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return permission]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply umask.
+ @see FsPermission#applyUMask(FsPermission)]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link PermissionStatus} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store permission related information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.PermissionStatus -->
+</package>
+<package name="org.apache.hadoop.fs.s3">
+ <!-- start class org.apache.hadoop.fs.s3.Block -->
+ <class name="Block" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Block" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Holds metadata about a block of data being stored in a {@link FileSystemStore}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.Block -->
+ <!-- start interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <interface name="FileSystemStore" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inode" type="org.apache.hadoop.fs.s3.INode"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="inodeExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveINode" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveBlock" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="byteRangeStart" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listSubPaths" return="java.util.Set&lt;org.apache.hadoop.fs.Path&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listDeepSubPaths" return="java.util.Set&lt;org.apache.hadoop.fs.Path&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="purge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete everything. Used for testing.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="dump"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Diagnostic method to dump all INodes to the console.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for storing and retrieving {@link INode}s and {@link Block}s.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <!-- start class org.apache.hadoop.fs.s3.INode -->
+ <class name="INode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="INode" type="org.apache.hadoop.fs.s3.INode.FileType, org.apache.hadoop.fs.s3.Block[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.fs.s3.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileType" return="org.apache.hadoop.fs.s3.INode.FileType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSerializedLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="serialize" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deserialize" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="FILE_TYPES" type="org.apache.hadoop.fs.s3.INode.FileType[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DIRECTORY_INODE" type="org.apache.hadoop.fs.s3.INode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Holds file metadata including type (regular file, or directory),
+ and the list of blocks that are pointers to the data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.INode -->
+ <!-- start class org.apache.hadoop.fs.s3.MigrationTool -->
+ <class name="MigrationTool" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="MigrationTool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ This class is a tool for migrating data from an older to a newer version
+ of an S3 filesystem.
+ </p>
+ <p>
+ All files in the filesystem are migrated by re-writing the block metadata
+ - no datafiles are touched.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.MigrationTool -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Exception -->
+ <class name="S3Exception" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Exception" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown if there is a problem communicating with Amazon S3.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Exception -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <class name="S3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="S3FileSystem" type="org.apache.hadoop.fs.s3.FileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[FileStatus for S3 file systems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} backed by <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <class name="S3FileSystemException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystemException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when there is a fatal exception while using {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <!-- start class org.apache.hadoop.fs.s3.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="org.apache.hadoop.fs.s3.S3FileSystemException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when Hadoop cannot read the version of the data stored
+ in {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.VersionMismatchException -->
+ <doc>
+ <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem} that uses <a href="http://aws.amazon.com/s3">Amazon S3</a>.</p>
+
+<p>
+Files are stored in S3 as blocks (represented by
+{@link org.apache.hadoop.fs.s3.Block}), which have an ID and a length.
+Block metadata is stored in S3 as a small record (represented by
+{@link org.apache.hadoop.fs.s3.INode}) using the URL-encoded
+path string as a key. Inodes record the file type (regular file or directory) and the list of blocks.
+This design makes it easy to seek to any given position in a file by reading the inode data to compute
+which block to access, then using S3's support for
+<a href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.2">HTTP Range</a> headers
+to start streaming from the correct position.
+Renames are also efficient since only the inode is moved (by a DELETE followed by a PUT since
+S3 does not support renames).
+</p>
+<p>
+For a single file <i>/dir1/file1</i> which takes two blocks of storage, the file structure in S3
+would be something like this:
+</p>
+<pre>
+/
+/dir1
+/dir1/file1
+block-6415776850131549260
+block-3026438247347758425
+</pre>
+<p>
+Inodes start with a leading <code>/</code>, while blocks are prefixed with <code>block-</code>.
+</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.fs.shell">
+ <!-- start class org.apache.hadoop.fs.shell.Count -->
+ <class name="Count" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Count"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="matches" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String"/>
+ </method>
+ <method name="count"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="java.io.PrintStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USAGE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DESCRIPTION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Count the number of directories, files and bytes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Count -->
+</package>
+<package name="org.apache.hadoop.io">
+ <!-- start class org.apache.hadoop.io.AbstractMapWritable -->
+ <class name="AbstractMapWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="AbstractMapWritable"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor.]]>
+ </doc>
+ </constructor>
+ <method name="addToMap"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add a Class to the maps if it is not already present.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="byte"/>
+ <doc>
+ <![CDATA[@return the Class class for the specified id]]>
+ </doc>
+ </method>
+ <method name="getId" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[@return the id for the specified Class]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Used by child copy constructors.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the conf]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@param conf the conf to set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract base class for MapWritable and SortedMapWritable
+
+ Unlike org.apache.nutch.crawl.MapWritable, this class allows creation of
+ MapWritable&lt;Writable, MapWritable&gt; so the CLASS_TO_ID and ID_TO_CLASS
+ maps travel with the class instead of being static.
+
+ Class ids range from 1 to 127 so there can be at most 127 distinct classes
+ in any specific map instance.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.AbstractMapWritable -->
+ <!-- start class org.apache.hadoop.io.ArrayFile -->
+ <class name="ArrayFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A dense file-based mapping from integers to values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Reader -->
+ <class name="ArrayFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an array reader for the named file.]]>
+ </doc>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader before its <code>n</code>th value.]]>
+ </doc>
+ </method>
+ <method name="next" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read and return the next value in the file.]]>
+ </doc>
+ </method>
+ <method name="key" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the key associated with the most recent call to {@link
+ #seek(long)}, {@link #next(Writable)}, or {@link
+ #get(long,Writable)}.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the <code>n</code>th value in the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Reader -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Writer -->
+ <class name="ArrayFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a value to the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Writer -->
+ <!-- start class org.apache.hadoop.io.ArrayWritable -->
+ <class name="ArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for arrays containing instances of a class. The elements of this
+ writable must all be instances of the same class. If this writable will be
+ the input for a Reducer, you will need to create a subclass that sets the
+ value to be of the proper type.
+
+ For example:
+ <code>
+ public class IntArrayWritable extends ArrayWritable {
+ public IntArrayWritable() {
+ super(IntWritable.class);
+ }
+ }
+ </code>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayWritable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable -->
+ <class name="BooleanWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BooleanWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BooleanWritable" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="get" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for booleans.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <class name="BooleanWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BooleanWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BooleanWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.BytesWritable -->
+ <class name="BytesWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-size sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="BytesWritable" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a BytesWritable using the byte array as the initial value.
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the BytesWritable.
+ @return The data is only valid between 0 and getSize() - 1.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current size of the buffer.]]>
+ </doc>
+ </method>
+ <method name="setSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Change the size of the buffer. The values in the old range are preserved
+ and any new values are undefined. The capacity is changed if it is
+ necessary.
+ @param size The new number of bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum size that could handled without
+ resizing the backing storage.
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_cap" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved.
+ @param new_cap The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="org.apache.hadoop.io.BytesWritable"/>
+ <doc>
+ <![CDATA[Set the BytesWritable to the contents of the given newData.
+ @param newData the value to set this BytesWritable to.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Set the value to a copy of the given byte range
+ @param newData the new values to copy in
+ @param offset the offset in newData to start at
+ @param length the number of bytes to copy]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="right_obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Define the sort order of the BytesWritable.
+ @param right_obj The other bytes writable
+ @return Positive if left is bigger than right, 0 if they are equal, and
+ negative if left is smaller than right.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="right_obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Are the two byte sequences equal?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Generate the stream of bytes as hex pairs separated by ' '.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is usable as a key or value.
+ It is resizable and distinguishes between the size of the seqeunce and
+ the current capacity. The hash function is the front of the md5 of the
+ buffer. The sort order is the same as memcmp.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable -->
+ <!-- start class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <class name="BytesWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BytesWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BytesWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <!-- start interface org.apache.hadoop.io.Closeable -->
+ <interface name="Closeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after the last call to any other method on this object to free
+ and/or flush resources. Typical implementations do nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[That which can be closed.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Closeable -->
+ <!-- start class org.apache.hadoop.io.CompressedWritable -->
+ <class name="CompressedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="CompressedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ensureInflated"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Must be called by all methods which access fields to ensure that the data
+ has been uncompressed.]]>
+ </doc>
+ </method>
+ <method name="readFieldsCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #readFields(DataInput)}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #write(DataOutput)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base-class for Writables which store themselves compressed and lazily
+ inflate on field access. This is useful for large objects whose fields are
+ not be altered during a map or reduce operation: leaving the field data
+ compressed makes copying the instance from one file to another much
+ faster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.CompressedWritable -->
+ <!-- start class org.apache.hadoop.io.DataInputBuffer -->
+ <class name="DataInputBuffer" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataInputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataInput} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataInputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataInputBuffer buffer = new DataInputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using DataInput methods ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataInputBuffer -->
+ <!-- start class org.apache.hadoop.io.DataOutputBuffer -->
+ <class name="DataOutputBuffer" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataOutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a DataInput directly into the buffer.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataOutput} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataOutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataOutputBuffer buffer = new DataOutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using DataOutput methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataOutputBuffer -->
+ <!-- start class org.apache.hadoop.io.DefaultStringifier -->
+ <class name="DefaultStringifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Stringifier&lt;T&gt;"/>
+ <constructor name="DefaultStringifier" type="org.apache.hadoop.conf.Configuration, java.lang.Class&lt;T&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="fromString" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="store"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="item" type="K"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the item in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to store
+ @param item the object to be stored
+ @param keyName the name of the key to use
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="load" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class&lt;K&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="storeArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="items" type="K[]"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the array of items in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param items the objects to be stored
+ @param keyName the name of the key to use
+ @throws IndexOutOfBoundsException if the items array is empty
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="loadArray" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class&lt;K&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the array of objects from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[DefaultStringifier is the default implementation of the {@link Stringifier}
+ interface which stringifies the objects using base64 encoding of the
+ serialized version of the objects. The {@link Serializer} and
+ {@link Deserializer} are obtained from the {@link SerializationFactory}.
+ <br>
+ DefaultStringifier offers convenience methods to store/load objects to/from
+ the configuration.
+
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DefaultStringifier -->
+ <!-- start class org.apache.hadoop.io.FloatWritable -->
+ <class name="FloatWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="FloatWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FloatWritable" type="float"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="float"/>
+ <doc>
+ <![CDATA[Set the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a FloatWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two FloatWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for floats.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable -->
+ <!-- start class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <class name="FloatWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FloatWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for FloatWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.GenericWritable -->
+ <class name="GenericWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="GenericWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Set the instance that is wrapped.
+
+ @param obj]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the wrapped instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTypes" return="java.lang.Class[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return all classes that may be wrapped. Subclasses should implement this
+ to return a constant array of classes.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper for Writable instances.
+ <p>
+ When two sequence files, which have same Key type but different Value
+ types, are mapped out to reduce, multiple Value types is not allowed.
+ In this case, this class can help you wrap instances with different types.
+ </p>
+
+ <p>
+ Compared with <code>ObjectWritable</code>, this class is much more effective,
+ because <code>ObjectWritable</code> will append the class declaration as a String
+ into the output file in every Key-Value pair.
+ </p>
+
+ <p>
+ Generic Writable implements {@link Configurable} interface, so that it will be
+ configured by the framework. The configuration is passed to the wrapped objects
+ implementing {@link Configurable} interface <i>before deserialization</i>.
+ </p>
+
+ how to use it: <br>
+ 1. Write your own class, such as GenericObject, which extends GenericWritable.<br>
+ 2. Implements the abstract method <code>getTypes()</code>, defines
+ the classes which will be wrapped in GenericObject in application.
+ Attention: this classes defined in <code>getTypes()</code> method, must
+ implement <code>Writable</code> interface.
+ <br><br>
+
+ The code looks like this:
+ <blockquote><pre>
+ public class GenericObject extends GenericWritable {
+
+ private static Class[] CLASSES = {
+ ClassType1.class,
+ ClassType2.class,
+ ClassType3.class,
+ };
+
+ protected Class[] getTypes() {
+ return CLASSES;
+ }
+
+ }
+ </pre></blockquote>
+
+ @since Nov 8, 2006]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.GenericWritable -->
+ <!-- start class org.apache.hadoop.io.InputBuffer -->
+ <class name="InputBuffer" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link InputStream} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new InputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ InputBuffer buffer = new InputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using InputStream methods ...
+ }
+ </pre>
+ @see DataInputBuffer
+ @see DataOutput]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.InputBuffer -->
+ <!-- start class org.apache.hadoop.io.IntWritable -->
+ <class name="IntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="IntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="IntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a IntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two IntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for ints.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable -->
+ <!-- start class org.apache.hadoop.io.IntWritable.Comparator -->
+ <class name="IntWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IntWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for IntWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.IOUtils -->
+ <class name="IOUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="buffSize" type="int"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param buffSize the size of the buffer
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another. <strong>closes the input and output streams
+ at the end</strong>.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads len bytes in a loop.
+ @param in The InputStream to read from
+ @param buf The buffer to fill
+ @param off offset from the buffer
+ @param len the length of bytes to read
+ @throws IOException if it could not read requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Similar to readFully(). Skips bytes in a loop.
+ @param in The InputStream to skip bytes from
+ @param len number of bytes to skip.
+ @throws IOException if it could not skip requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="closeStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Closeable"/>
+ <doc>
+ <![CDATA[Closes the stream ignoring {@link IOException}
+ @param stream the Stream to close]]>
+ </doc>
+ </method>
+ <method name="closeSocket"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <doc>
+ <![CDATA[Closes the socket ignoring {@link IOException}
+ @param sock the Socket to close]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An utility class for I/O related functionality.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils -->
+ <!-- start class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <class name="IOUtils.NullOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils.NullOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[/dev/null of OutputStreams.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <!-- start class org.apache.hadoop.io.LongWritable -->
+ <class name="LongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="LongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a LongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two LongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable -->
+ <!-- start class org.apache.hadoop.io.LongWritable.Comparator -->
+ <class name="LongWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <class name="LongWritable.DecreasingComparator" extends="org.apache.hadoop.io.LongWritable.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.DecreasingComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A decreasing Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <!-- start class org.apache.hadoop.io.MapFile -->
+ <class name="MapFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="oldName" type="java.lang.String"/>
+ <param name="newName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames an existing map directory.]]>
+ </doc>
+ </method>
+ <method name="delete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deletes the named map file.]]>
+ </doc>
+ </method>
+ <method name="fix" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valueClass" type="java.lang.Class"/>
+ <param name="dryrun" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This method attempts to fix a corrupt MapFile by re-creating its index.
+ @param fs filesystem
+ @param dir directory containing the MapFile data and index
+ @param keyClass key class (has to be a subclass of Writable)
+ @param valueClass value class (has to be a subclass of Writable)
+ @param dryrun do not perform any changes, just report what needs to be done
+ @return number of valid entries in this MapFile, or -1 if no fixing was needed
+ @throws Exception]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="INDEX_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the index file.]]>
+ </doc>
+ </field>
+ <field name="DATA_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the data file.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A file-based map from keys to values.
+
+ <p>A map is a directory containing two files, the <code>data</code> file,
+ containing all keys and values in the map, and a smaller <code>index</code>
+ file, containing a fraction of the keys. The fraction is determined by
+ {@link Writer#getIndexInterval()}.
+
+ <p>The index file is read entirely into memory. Thus key implementations
+ should try to keep themselves small.
+
+ <p>Map files are created by adding entries in-order. To maintain a large
+ database, perform updates by copying the previous version of a database and
+ merging in a sorted change list, to create a new version of the database in
+ a new file. Sorting large change lists can be done with {@link
+ SequenceFile.Sorter}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile -->
+ <!-- start class org.apache.hadoop.io.MapFile.Reader -->
+ <class name="MapFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map using the named comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Hook to allow subclasses to defer opening streams until further
+ initialization is complete.
+ @see #createDataFileReader(FileSystem, Path, Configuration)]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="open"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dirName" type="java.lang.String"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDataFileReader" return="org.apache.hadoop.io.SequenceFile.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dataFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link SequenceFile.Reader} returned.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Re-positions the reader before its first key.]]>
+ </doc>
+ </method>
+ <method name="midKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the key at approximately the middle of the file.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalKey"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the final key from the file.
+
+ @param key key to read into]]>
+ </doc>
+ </method>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader at the named key, or if none such exists, at the
+ first entry after the named key. Returns true iff the named key exists
+ in this map.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the map into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ the end of the map]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the value for the named key, or null if none exists.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+ Returns <code>key</code> or if it does not exist, at the first entry
+ after the named key.
+
+- * @param key - key that we're trying to find
+- * @param val - data value if key is found
+- * @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <param name="before" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+
+ @param key - key that we're trying to find
+ @param val - data value if key is found
+ @param before - IF true, and <code>key</code> does not exist, return
+ the first entry that falls just before the <code>key</code>. Otherwise,
+ return the record that sorts just after.
+ @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Reader -->
+ <!-- start class org.apache.hadoop.io.MapFile.Writer -->
+ <class name="MapFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <method name="getIndexInterval" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of entries that are added before an index entry is added.]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval.
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval and stores it in conf
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair to the map. The key must be greater or equal
+ to the previous key added to the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writes a new map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Writer -->
+ <!-- start class org.apache.hadoop.io.MapWritable -->
+ <class name="MapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Map&lt;org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapWritable" type="org.apache.hadoop.io.MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map&lt;? extends org.apache.hadoop.io.Writable, ? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable Map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapWritable -->
+ <!-- start class org.apache.hadoop.io.MD5Hash -->
+ <class name="MD5Hash" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="MD5Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash from a hex string.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash with a specified value.]]>
+ </doc>
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs, reads and returns an instance.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+ <doc>
+ <![CDATA[Copy the contents of another instance into this instance.]]>
+ </doc>
+ </method>
+ <method name="getDigest" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the digest bytes.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="halfDigest" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a half-sized version of this MD5. Fits in a long]]>
+ </doc>
+ </method>
+ <method name="quarterDigest" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a 32-bit digest of the MD5.
+ @return the first 4 bytes of the md5]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is an MD5Hash whose digest contains the
+ same values.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for this object.
+ Only uses the first 4 bytes, since md5s are evenly distributed.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares this object with the specified object for order.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="setDigest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the digest value from a hex string.]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Writable for MD5 hash values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash -->
+ <!-- start class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <class name="MD5Hash.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MD5Hash.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for MD5Hash keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <!-- start class org.apache.hadoop.io.MultipleIOException -->
+ <class name="MultipleIOException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getExceptions" return="java.util.List&lt;java.io.IOException&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the underlying exceptions]]>
+ </doc>
+ </method>
+ <method name="createIOException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="exceptions" type="java.util.List&lt;java.io.IOException&gt;"/>
+ <doc>
+ <![CDATA[A convenient method to create an {@link IOException}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Encapsulate a list of {@link IOException} into an {@link IOException}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MultipleIOException -->
+ <!-- start class org.apache.hadoop.io.NullWritable -->
+ <class name="NullWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <method name="get" return="org.apache.hadoop.io.NullWritable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the single instance of this class.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Singleton Writable with no data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable -->
+ <!-- start class org.apache.hadoop.io.ObjectWritable -->
+ <class name="ObjectWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="ObjectWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Class, java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the instance, or null if none.]]>
+ </doc>
+ </method>
+ <method name="getDeclaredClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the class this is meant to be.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Reset the instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeObject"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="instance" type="java.lang.Object"/>
+ <param name="declaredClass" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="objectWritable" type="org.apache.hadoop.io.ObjectWritable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A polymorphic Writable that writes an instance with it's class name.
+ Handles arrays, strings and primitive types without a Writable wrapper.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ObjectWritable -->
+ <!-- start class org.apache.hadoop.io.OutputBuffer -->
+ <class name="OutputBuffer" extends="java.io.FilterOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.OutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a InputStream directly into the buffer.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link OutputStream} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new OutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ OutputBuffer buffer = new OutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using OutputStream methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>
+ @see DataOutputBuffer
+ @see InputBuffer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.OutputBuffer -->
+ <!-- start interface org.apache.hadoop.io.RawComparator -->
+ <interface name="RawComparator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Comparator&lt;T&gt;"/>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link Comparator} that operates directly on byte representations of
+ objects.
+ </p>
+ @param <T>
+ @see DeserializerComparator]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.RawComparator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile -->
+ <class name="SequenceFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapred.JobConf#getMapOutputCompressionType()}
+ to get {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the compression type for the reduce outputs
+ @param job the job config to look in
+ @return the kind of compression to use
+ @deprecated Use {@link org.apache.hadoop.mapred.JobConf#getMapOutputCompressionType()}
+ to get {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.JobConf#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the compression type for sequence files.
+ @param job the configuration to modify
+ @param val the new compression type (none, block, record)
+ @deprecated Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.JobConf#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param bufferSize buffer size for the underlaying outputstream.
+ @param replication replication factor for the file.
+ @param blockSize block size for the file.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="SYNC_INTERVAL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes between sync points.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<code>SequenceFile</code>s are flat files consisting of binary key/value
+ pairs.
+
+ <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and
+ {@link Sorter} classes for writing, reading and sorting respectively.</p>
+
+ There are three <code>SequenceFile</code> <code>Writer</code>s based on the
+ {@link CompressionType} used to compress key/value pairs:
+ <ol>
+ <li>
+ <code>Writer</code> : Uncompressed records.
+ </li>
+ <li>
+ <code>RecordCompressWriter</code> : Record-compressed files, only compress
+ values.
+ </li>
+ <li>
+ <code>BlockCompressWriter</code> : Block-compressed files, both keys &
+ values are collected in 'blocks'
+ separately and compressed. The size of
+ the 'block' is configurable.
+ </ol>
+
+ <p>The actual compression algorithm used to compress key and/or values can be
+ specified by using the appropriate {@link CompressionCodec}.</p>
+
+ <p>The recommended way is to use the static <tt>createWriter</tt> methods
+ provided by the <code>SequenceFile</code> to chose the preferred format.</p>
+
+ <p>The {@link Reader} acts as the bridge and can read any of the above
+ <code>SequenceFile</code> formats.</p>
+
+ <h4 id="Formats">SequenceFile Formats</h4>
+
+ <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
+ depending on the <code>CompressionType</code> specified. All of them share a
+ <a href="#Header">common header</a> described below.
+
+ <h5 id="Header">SequenceFile Header</h5>
+ <ul>
+ <li>
+ version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual
+ version number (e.g. SEQ4 or SEQ6)
+ </li>
+ <li>
+ keyClassName -key class
+ </li>
+ <li>
+ valueClassName - value class
+ </li>
+ <li>
+ compression - A boolean which specifies if compression is turned on for
+ keys/values in this file.
+ </li>
+ <li>
+ blockCompression - A boolean which specifies if block-compression is
+ turned on for keys/values in this file.
+ </li>
+ <li>
+ compression codec - <code>CompressionCodec</code> class which is used for
+ compression of keys and/or values (if compression is
+ enabled).
+ </li>
+ <li>
+ metadata - {@link Metadata} for this file.
+ </li>
+ <li>
+ sync - A sync marker to denote end of the header.
+ </li>
+ </ul>
+
+ <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li>Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li><i>Compressed</i> Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record <i>Block</i>
+ <ul>
+ <li>Compressed key-lengths block-size</li>
+ <li>Compressed key-lengths block</li>
+ <li>Compressed keys block-size</li>
+ <li>Compressed keys block</li>
+ <li>Compressed value-lengths block-size</li>
+ <li>Compressed value-lengths block</li>
+ <li>Compressed values block-size</li>
+ <li>Compressed values block</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <p>The compressed blocks of key lengths and value lengths consist of the
+ actual lengths of individual keys/values encoded in ZeroCompressedInteger
+ format.</p>
+
+ @see CompressionCodec]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <class name="SequenceFile.CompressionType" extends="java.lang.Enum&lt;org.apache.hadoop.io.SequenceFile.CompressionType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.SequenceFile.CompressionType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression type used to compress key/value pairs in the
+ {@link SequenceFile}.
+
+ @see SequenceFile.Writer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <class name="SequenceFile.Metadata" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFile.Metadata" type="java.util.TreeMap&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="getMetadata" return="java.util.TreeMap&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The class encapsulating with the metadata of a file.
+ The metadata of a file is a list of attribute name/value
+ pairs of Text type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Reader -->
+ <class name="SequenceFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Reader" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the named file.]]>
+ </doc>
+ </constructor>
+ <method name="openFile" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link FSDataInputStream} returned.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the key class.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the value class.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="isCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if values are compressed.]]>
+ </doc>
+ </method>
+ <method name="isBlockCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if records are block-compressed.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="getMetadata" return="org.apache.hadoop.io.SequenceFile.Metadata"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the metadata object of the file]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file into <code>key</code>, skipping its
+ value. True if another entry exists, and false at end of file.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the file into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ end of file]]>
+ </doc>
+ </method>
+ <method name="next" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.">
+ <param name="buffer" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.]]>
+ </doc>
+ </method>
+ <method name="createValueBytes" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRaw" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' records.
+ @param key - The buffer into which the key is read
+ @param val - The 'raw' value
+ @return Returns the total record length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawKey" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' keys.
+ @param key - The buffer into which the key is read
+ @return Returns the key length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' values.
+ @param val - The 'raw' value
+ @return Returns the value length
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the current byte position in the input file.
+
+ <p>The position passed must be a position returned by {@link
+ SequenceFile.Writer#getLength()} when writing this file. To seek to an arbitrary
+ position, use {@link SequenceFile.Reader#sync(long)}.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the next sync mark past a given position.]]>
+ </doc>
+ </method>
+ <method name="syncSeen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true iff the previous call to next passed a sync mark.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current byte position in the input file.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reads key/value pairs from a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <class name="SequenceFile.Sorter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge files containing the named classes.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.io.RawComparator, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge using an arbitrary {@link RawComparator}.]]>
+ </doc>
+ </constructor>
+ <method name="setFactor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="factor" type="int"/>
+ <doc>
+ <![CDATA[Set the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="getFactor" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="setMemory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="memory" type="int"/>
+ <doc>
+ <![CDATA[Set the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="getMemory" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setProgressable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progressable" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Set the progressable object in order to report progress.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files into an output file.
+ @param inFiles the files to be sorted
+ @param outFile the sorted output file
+ @param deleteInput should the input files be deleted as they are read?]]>
+ </doc>
+ </method>
+ <method name="sortAndIterate" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files and return an iterator.
+ @param inFiles the files to be sorted
+ @param tempDir the directory where temp files are created during sort
+ @param deleteInput should the input files be deleted as they are read?
+ @return iterator the RawKeyValueIterator]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The backwards compatible interface to sort.
+ @param inFile the input file to sort
+ @param outFile the sorted output file]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="segments" type="java.util.List&lt;org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor&gt;"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the list of segments of type <code>SegmentDescriptor</code>
+ @param segments the list of SegmentDescriptors
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIterator
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[] using a max factor value
+ that is already set
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="factor" type="int"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param factor the factor that will be used as the maximum merge fan-in
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInputs" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param tempDir the directory for creating temp files during merge
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cloneFileAttributes" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="prog" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clones the attributes (like compression of the input file and creates a
+ corresponding Writer
+ @param inputFile the path of the input file whose attributes should be
+ cloned
+ @param outputFile the path of the output file
+ @param prog the Progressable to report status during the file write
+ @return Writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="records" type="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"/>
+ <param name="writer" type="org.apache.hadoop.io.SequenceFile.Writer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes records from RawKeyValueIterator into a file represented by the
+ passed writer
+ @param records the RawKeyValueIterator
+ @param writer the Writer created earlier
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merge the provided files.
+ @param inFiles the array of input path names
+ @param outFile the final output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sorts key/value pairs in a sequence-format file.
+
+ <p>For best performance, applications should make sure that the {@link
+ Writable#readFields(DataInput)} implementation of their keys is
+ very efficient. In particular, it should avoid allocating memory.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <interface name="SequenceFile.Sorter.RawKeyValueIterator" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw key
+ @return DataOutputBuffer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getValue" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw value
+ @return ValueBytes
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets up the current key and value (for getKey and getValue)
+ @return true if there exists a key/value, false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[closes the iterator so that the underlying streams can be closed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the Progress object; this has a float (0.0 - 1.0)
+ indicating the bytes processed by the iterator so far]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to iterate over raw keys/values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <class name="SequenceFile.Sorter.SegmentDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="SequenceFile.Sorter.SegmentDescriptor" type="long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a segment
+ @param segmentOffset the offset of the segment in the file
+ @param segmentLength the length of the segment
+ @param segmentPathName the path name of the file containing the segment]]>
+ </doc>
+ </constructor>
+ <method name="doSync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do the sync checks]]>
+ </doc>
+ </method>
+ <method name="preserveInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="preserve" type="boolean"/>
+ <doc>
+ <![CDATA[Whether to delete the files when no longer needed]]>
+ </doc>
+ </method>
+ <method name="shouldPreserveInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRawKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the rawKey object with the key returned by the Reader
+ @return true if there is a key returned; false, otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rawValue" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the passed rawValue with the value corresponding to the key
+ read earlier
+ @param rawValue
+ @return the length of the value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the stored rawKey]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The default cleanup. Subclasses can override this with a custom
+ cleanup]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class defines a merge segment. This class can be subclassed to
+ provide a customized cleanup method implementation. In this
+ implementation, cleanup closes the file handle and deletes the file]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <interface name="SequenceFile.ValueBytes" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the uncompressed bytes to the outStream.
+ @param outStream : Stream to write uncompressed bytes into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to outStream.
+ Note: that it will NOT compress the bytes if they are not compressed.
+ @param outStream : Stream to write compressed bytes into.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Size of stored data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to 'raw' values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Writer -->
+ <class name="SequenceFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, int, short, long, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a sync point]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="appendRaw"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keyData" type="byte[]"/>
+ <param name="keyOffset" type="int"/>
+ <param name="keyLength" type="int"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current length of the output file.
+
+ <p>This always returns a synchronized position. In other words,
+ immediately after calling {@link SequenceFile.Reader#seek(long)} with a position
+ returned by this method, {@link SequenceFile.Reader#next(Writable)} may be called. However
+ the key may be earlier in the file than key last written when this
+ method was called (e.g., with block-compression, it may be the first key
+ in the block that was being written when this method was called).]]>
+ </doc>
+ </method>
+ <field name="keySerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="uncompressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="compressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Write key/value pairs to a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SetFile -->
+ <class name="SetFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A file-based set of keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile -->
+ <!-- start class org.apache.hadoop.io.SetFile.Reader -->
+ <class name="SetFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set using the named comparator.]]>
+ </doc>
+ </constructor>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in a set into <code>key</code>. Returns
+ true if such a key exists and false when at the end of the set.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the matching key from a set into <code>key</code>.
+ Returns <code>key</code>, or null if no match exists.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SetFile.Writer -->
+ <class name="SetFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="pass a Configuration too">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named set for keys of the named class.
+ @deprecated pass a Configuration too]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element class and compression type.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element comparator and compression type.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key to a set. The key must be strictly greater than the
+ previous key added to the set.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SortedMapWritable -->
+ <class name="SortedMapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="SortedMapWritable" type="org.apache.hadoop.io.SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="comparator" return="java.util.Comparator&lt;? super org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="firstKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="headMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="lastKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="subMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="tailMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set&lt;org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map&lt;? extends org.apache.hadoop.io.WritableComparable, ? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable SortedMap.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SortedMapWritable -->
+ <!-- start interface org.apache.hadoop.io.Stringifier -->
+ <interface name="Stringifier" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Converts the object to a string representation
+ @param obj the object to convert
+ @return the string representation of the object
+ @throws IOException if the object cannot be converted]]>
+ </doc>
+ </method>
+ <method name="fromString" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from its string representation.
+ @param str the string representation of the object
+ @return restored object
+ @throws IOException if the object cannot be restored]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes this object.
+ @throws IOException if an I/O error occurs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stringifier interface offers two methods to convert an object
+ to a string representation and restore the object given its
+ string representation.
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Stringifier -->
+ <!-- start class org.apache.hadoop.io.Text -->
+ <class name="Text" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Text" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a string.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="org.apache.hadoop.io.Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from another text.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a byte array.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retuns the raw bytes.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of bytes in the byte array]]>
+ </doc>
+ </method>
+ <method name="charAt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="int"/>
+ <doc>
+ <![CDATA[Returns the Unicode Scalar Value (32-bit integer value)
+ for the character at <code>position</code>. Note that this
+ method avoids using the converter or doing String instatiation
+ @return the Unicode scalar value at position or -1
+ if the position is invalid or points to a
+ trailing byte]]>
+ </doc>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Finds any occurence of <code>what</code> in the backing
+ buffer, starting as position <code>start</code>. The starting
+ position is measured in bytes and the return value is in
+ terms of byte position in the buffer. The backing buffer is
+ not converted to a string for this operation.
+ @return byte position of the first occurence of the search
+ string in the UTF-8 buffer or -1 if not found]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <doc>
+ <![CDATA[Set to a utf8 byte array]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[copy a text.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Set the Text to range of bytes
+ @param utf8 the data to copy from
+ @param start the first position of the new string
+ @param len the number of bytes of the new string]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Append a range of bytes to the end of the given text
+ @param utf8 the data to copy from
+ @param start the first position to append from utf8
+ @param len the number of bytes to append]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Clear the string to empty.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert text back to string
+ @see java.lang.Object#toString()]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialize]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one Text in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialize
+ write this object to out
+ length uses zero-compressed encoding
+ @see Writable#write(DataOutput)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare two Texts bytewise using standard UTF8 ordering.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a Text with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[hash function]]>
+ </doc>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If the input is malformed,
+ replace by a default value.]]>
+ </doc>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If the input is malformed,
+ invalid chars are replaced by a default value.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF8 encoded string from in]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF8 encoded string to out]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check if a byte array contains valid utf-8
+ @param utf8 byte array
+ @throws MalformedInputException if the byte array contains invalid utf-8]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check to see if a byte array is valid utf-8
+ @param utf8 the array of bytes
+ @param start the offset of the first byte in the array
+ @param len the length of the byte sequence
+ @throws MalformedInputException if the byte array contains invalid bytes]]>
+ </doc>
+ </method>
+ <method name="bytesToCodePoint" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="java.nio.ByteBuffer"/>
+ <doc>
+ <![CDATA[Returns the next code point at the current position in
+ the buffer. The buffer's position will be incremented.
+ Any mark set on this buffer will be changed by this method!]]>
+ </doc>
+ </method>
+ <method name="utf8Length" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[For the given string, returns the number of UTF-8 bytes
+ required to encode the string.
+ @param string text to encode
+ @return number of UTF-8 bytes required to encode]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class stores text using standard UTF8 encoding. It provides methods
+ to serialize, deserialize, and compare texts at byte level. The type of
+ length is integer and is serialized using zero-compressed format. <p>In
+ addition, it provides methods for string traversal without converting the
+ byte array to a string. <p>Also includes utilities for
+ serializing/deserialing a string, coding/decoding a string, checking if a
+ byte array contains valid UTF8 code, calculating the length of an encoded
+ string.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text -->
+ <!-- start class org.apache.hadoop.io.Text.Comparator -->
+ <class name="Text.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Text.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for Text keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text.Comparator -->
+ <!-- start class org.apache.hadoop.io.TwoDArrayWritable -->
+ <class name="TwoDArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[][]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[][]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[][]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for 2D arrays containing a matrix of instances of a class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.TwoDArrayWritable -->
+ <!-- start class org.apache.hadoop.io.UTF8 -->
+ <class name="UTF8" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="replaced by Text">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UTF8" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <constructor name="UTF8" type="org.apache.hadoop.io.UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw bytes.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the encoded string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one UTF8 in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare two UTF8s.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert to a String.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a UTF8 with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convert a string to a UTF-8 encoded byte array.
+ @see String#getBytes(String)]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string.
+
+ @see DataInput#readUTF()]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF-8 encoded string.
+
+ @see DataOutput#writeUTF(String)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for strings that uses the UTF8 encoding.
+
+ <p>Also includes utilities for efficiently reading and writing UTF-8.
+
+ @deprecated replaced by Text]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8 -->
+ <!-- start class org.apache.hadoop.io.UTF8.Comparator -->
+ <class name="UTF8.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UTF8.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8.Comparator -->
+ <!-- start class org.apache.hadoop.io.VersionedWritable -->
+ <class name="VersionedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="VersionedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="byte"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the version number of the current implementation.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A base class for Writables that provides version checking.
+
+ <p>This is useful when a class may evolve, so that instances written by the
+ old version of the class may still be processed by the new version. To
+ handle this situation, {@link #readFields(DataInput)}
+ implementations should catch {@link VersionMismatchException}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionedWritable -->
+ <!-- start class org.apache.hadoop.io.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="byte, byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Thrown by {@link VersionedWritable#readFields(DataInput)} when the
+ version of an object being read does not match the current implementation
+ version as returned by {@link VersionedWritable#getVersion()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionMismatchException -->
+ <!-- start class org.apache.hadoop.io.VIntWritable -->
+ <class name="VIntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VIntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VIntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VIntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VIntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for integer values stored in variable-length format.
+ Such values take between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVInt(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VIntWritable -->
+ <!-- start class org.apache.hadoop.io.VLongWritable -->
+ <class name="VLongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VLongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VLongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VLongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VLongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs in a variable-length format. Such values take
+ between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVLong(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VLongWritable -->
+ <!-- start interface org.apache.hadoop.io.Writable -->
+ <interface name="Writable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the fields of this object to <code>out</code>.
+
+ @param out <code>DataOuput</code> to serialize this object into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the fields of this object from <code>in</code>.
+
+ <p>For efficiency, implementations should attempt to re-use storage in the
+ existing object where possible.</p>
+
+ @param in <code>DataInput</code> to deseriablize this object from.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A serializable object which implements a simple, efficient, serialization
+ protocol, based on {@link DataInput} and {@link DataOutput}.
+
+ <p>Any <code>key</code> or <code>value</code> type in the Hadoop Map-Reduce
+ framework implements this interface.</p>
+
+ <p>Implementations typically implement a static <code>read(DataInput)</code>
+ method which constructs a new instance, calls {@link #readFields(DataInput)}
+ and returns the instance.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritable implements Writable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public static MyWritable read(DataInput in) throws IOException {
+ MyWritable w = new MyWritable();
+ w.readFields(in);
+ return w;
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Writable -->
+ <!-- start interface org.apache.hadoop.io.WritableComparable -->
+ <interface name="WritableComparable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <doc>
+ <![CDATA[A {@link Writable} which is also {@link Comparable}.
+
+ <p><code>WritableComparable</code>s can be compared to each other, typically
+ via <code>Comparator</code>s. Any type which is to be used as a
+ <code>key</code> in the Hadoop Map-Reduce framework should implement this
+ interface.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritableComparable implements WritableComparable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public int compareTo(MyWritableComparable w) {
+ int thisValue = this.value;
+ int thatValue = ((IntWritable)o).value;
+ return (thisValue &lt; thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableComparable -->
+ <!-- start class org.apache.hadoop.io.WritableComparator -->
+ <class name="WritableComparator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator"/>
+ <constructor name="WritableComparator" type="java.lang.Class"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get a comparator for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link WritableComparable}
+ implementation.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the WritableComparable implementation class.]]>
+ </doc>
+ </method>
+ <method name="newKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a new {@link WritableComparable} instance.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Optimization hook. Override this to make SequenceFile.Sorter's scream.
+
+ <p>The default implementation reads the data into two {@link
+ WritableComparable}s (using {@link
+ Writable#readFields(DataInput)}, then calls {@link
+ #compare(WritableComparable,WritableComparable)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[Compare two WritableComparables.
+
+ <p> The default implementation uses the natural ordering, calling {@link
+ Comparable#compareTo(Object)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <method name="hashBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Compute hash for binary data.]]>
+ </doc>
+ </method>
+ <method name="readUnsignedShort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an unsigned short from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an integer from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a long from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator for {@link WritableComparable}s.
+
+ <p>This base implemenation uses the natural ordering. To define alternate
+ orderings, override {@link #compare(WritableComparable,WritableComparable)}.
+
+ <p>One may optimize compare-intensive operations by overriding
+ {@link #compare(byte[],int,int,byte[],int,int)}. Static utility methods are
+ provided to assist in optimized implementations of this method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableComparator -->
+ <!-- start class org.apache.hadoop.io.WritableFactories -->
+ <class name="WritableFactories" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="factory" type="org.apache.hadoop.io.WritableFactory"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.io.WritableFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factories for non-public writables. Defining a factory permits {@link
+ ObjectWritable} to be able to construct instances of non-public classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableFactories -->
+ <!-- start interface org.apache.hadoop.io.WritableFactory -->
+ <interface name="WritableFactory" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a new instance.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A factory for a class of Writable.
+ @see WritableFactories]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableFactory -->
+ <!-- start class org.apache.hadoop.io.WritableName -->
+ <class name="WritableName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the name that a class should be known as to something other than the
+ class name.]]>
+ </doc>
+ </method>
+ <method name="addName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add an alternate name for a class.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Return the name for a class. Default is {@link Class#getName()}.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the class for a name. Default is {@link Class#forName(String)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility to permit renaming of Writable implementation classes without
+ invalidiating files that contain their class name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableName -->
+ <!-- start class org.apache.hadoop.io.WritableUtils -->
+ <class name="WritableUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="WritableUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readCompressedByteArray" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skipCompressedByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedByteArray" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="bytes" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="displayByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="record" type="byte[]"/>
+ </method>
+ <method name="clone" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="orig" type="org.apache.hadoop.io.Writable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Make a copy of a writable object using serialization to a buffer.
+ @param orig The object to copy
+ @return The copied object]]>
+ </doc>
+ </method>
+ <method name="cloneInto"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.io.Writable"/>
+ <param name="src" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a copy of the writable object using serialiation to a buffer
+ @param dst the object to copy from
+ @param src the object to copy into, which is destroyed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an integer to a binary stream with zero-compressed encoding.
+ For -120 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ integer is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -121 and -124, the following integer
+ is positive, with number of bytes that follow are -(v+120).
+ If the first byte value v is between -125 and -128, the following integer
+ is negative, with number of bytes that follow are -(v+124). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Integer to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized long from stream.]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized integer from stream.]]>
+ </doc>
+ </method>
+ <method name="isNegativeVInt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Given the first byte of a vint/vlong, determine the sign
+ @param value the first byte
+ @return is the value negative]]>
+ </doc>
+ </method>
+ <method name="decodeVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Parse the first byte of a vint/vlong to determine the number of bytes
+ @param value the first byte of the vint/vlong
+ @return the total number of bytes (1 to 9)]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="readEnum" return="T extends java.lang.Enum&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="enumType" type="java.lang.Class&lt;T&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an Enum value from DataInput, Enums are read and written
+ using String values.
+ @param <T> Enum type
+ @param in DataInput to read from
+ @param enumType Class type of Enum
+ @return Enum represented by String read from DataInput
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeEnum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="enumVal" type="java.lang.Enum"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[writes String value of enum to DataOutput.
+ @param out Dataoutput stream
+ @param enumVal enum value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip <i>len</i> number of bytes in input stream<i>in</i>
+ @param in input stream
+ @param len number of bytes to skip
+ @throws IOException when skipped less number of bytes]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableUtils -->
+ <doc>
+ <![CDATA[Generic i/o code for use when reading and writing data to the network,
+to databases, and to files.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.io.compress">
+ <!-- start interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <interface name="CompressionCodec" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream}.
+
+ @param out the location for the final output stream
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream} with the given {@link Compressor}.
+
+ @param out the location for the final output stream
+ @param compressor compressor to use
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
+
+ @return the type of compressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Compressor} for use by this {@link CompressionCodec}.
+
+ @return a new compressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a stream decompressor that will read from the given input stream.
+
+ @param in the stream to read compressed bytes from
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionInputStream} that will read from the given
+ {@link InputStream} with the given {@link Decompressor}.
+
+ @param in the stream to read compressed bytes from
+ @param decompressor decompressor to use
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
+
+ @return the type of decompressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
+
+ @return a new decompressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a streaming compression/decompression pair.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <class name="CompressionCodecFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionCodecFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Find the codecs specified in the config value io.compression.codecs
+ and register them. Defaults to gzip and zip.]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Print the extension map out as a string.]]>
+ </doc>
+ </method>
+ <method name="getCodecClasses" return="java.util.List&lt;java.lang.Class&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the list of codecs listed in the configuration
+ @param conf the configuration to look in
+ @return a list of the Configuration classes or null if the attribute
+ was not set]]>
+ </doc>
+ </method>
+ <method name="setCodecClasses"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="classes" type="java.util.List&lt;java.lang.Class&gt;"/>
+ <doc>
+ <![CDATA[Sets a list of codec classes in the configuration.
+ @param conf the configuration to modify
+ @param classes the list of classes to set]]>
+ </doc>
+ </method>
+ <method name="getCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Find the relevant compression codec for the given file based on its
+ filename suffix.
+ @param file the filename to check
+ @return the codec object]]>
+ </doc>
+ </method>
+ <method name="removeSuffix" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes a suffix from a filename, if it has it.
+ @param filename the filename to strip
+ @param suffix the suffix to remove
+ @return the shortened filename]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[A little test program.
+ @param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A factory that will find the correct codec for a given filename.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <class name="CompressionInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression input stream that reads
+ the decompressed bytes from the given stream.
+
+ @param in The input stream to be compressed.]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read bytes from the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the decompressor to its initial state and discard any buffered data,
+ as the underlying stream may have been repositioned.]]>
+ </doc>
+ </method>
+ <field name="in" type="java.io.InputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The input stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression input stream.
+
+ <p>Implementations are assumed to be buffered. This permits clients to
+ reposition the underlying input stream then call {@link #resetState()},
+ without having to also synchronize client buffers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <class name="CompressionOutputStream" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression output stream that writes
+ the compressed bytes to the given stream.
+ @param out]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finishes writing compressed data to the output stream
+ without closing the underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the compression to the initial state.
+ Does not reset the underlying stream.]]>
+ </doc>
+ </method>
+ <field name="out" type="java.io.OutputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The output stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression output stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <!-- start interface org.apache.hadoop.io.compress.Compressor -->
+ <interface name="Compressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for compression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of uncompressed bytes input so far.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of compressed bytes output so far.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[When called, indicates that compression should end
+ with the current contents of the input buffer.]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with compressed data. Returns actual number
+ of bytes of compressed data. A return value of 0 indicates that
+ needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets compressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the compressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'compressor' which can be
+ plugged into a {@link CompressionOutputStream} to compress data.
+ This is modelled after {@link java.util.zip.Deflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Compressor -->
+ <!-- start interface org.apache.hadoop.io.compress.Decompressor -->
+ <interface name="Decompressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for decompression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns <code>true</code> if a preset dictionary is needed for decompression.
+ @return <code>true</code> if a preset dictionary is needed for decompression]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with uncompressed data. Returns actual number
+ of bytes of uncompressed data. A return value of 0 indicates that
+ #needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets decompressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the decompressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'de-compressor' which can be
+ plugged into a {@link CompressionInputStream} to compress data.
+ This is modelled after {@link java.util.zip.Inflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Decompressor -->
+ <!-- start class org.apache.hadoop.io.compress.DefaultCodec -->
+ <class name="DefaultCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="DefaultCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.DefaultCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec -->
+ <class name="GzipCodec" extends="org.apache.hadoop.io.compress.DefaultCodec"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class creates gzip compressors/decompressors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <class name="GzipCodec.GzipInputStream" extends="org.apache.hadoop.io.compress.DecompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipInputStream" type="org.apache.hadoop.io.compress.DecompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow subclasses to directly set the inflater stream.]]>
+ </doc>
+ </constructor>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <class name="GzipCodec.GzipOutputStream" extends="org.apache.hadoop.io.compress.CompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipOutputStream" type="org.apache.hadoop.io.compress.CompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow children types to put a different type in here.
+ @param out the Deflater stream to use]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A bridge that wraps around a DeflaterOutputStream to make it
+ a CompressionOutputStream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <!-- start class org.apache.hadoop.io.compress.LzoCodec -->
+ <class name="LzoCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="LzoCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-lzo library is loaded & initialized.
+
+ @param conf configuration
+ @return <code>true</code> if native-lzo library is loaded & initialized;
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link org.apache.hadoop.io.compress.CompressionCodec} for a streaming
+ <b>lzo</b> compression/decompression pair.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzoCodec -->
+</package>
+<package name="org.apache.hadoop.io.compress.lzo">
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor -->
+ <class name="LzoCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="LzoCompressor" type="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified {@link CompressionStrategy}.
+
+ @param strategy lzo compression algorithm to use
+ @param directBufferSize size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="LzoCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default lzo1x_1 compression.]]>
+ </doc>
+ </constructor>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if lzo compressors are loaded and initialized.
+
+ @return <code>true</code> if lzo compressors are loaded & initialized,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of bytes given to this compressor since last reset.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of bytes consumed by callers of compress since last reset.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Noop.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the lzo algorithm.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy -->
+ <class name="LzoCompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression algorithm for lzo library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor -->
+ <class name="LzoDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="LzoDecompressor" type="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new lzo decompressor.
+
+ @param strategy lzo decompression algorithm
+ @param directBufferSize size of the direct-buffer]]>
+ </doc>
+ </constructor>
+ <constructor name="LzoDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new lzo decompressor.]]>
+ </doc>
+ </constructor>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if lzo decompressors are loaded and initialized.
+
+ @return <code>true</code> if lzo decompressors are loaded & initialized,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the lzo algorithm.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy -->
+ <class name="LzoDecompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy -->
+</package>
+<package name="org.apache.hadoop.io.compress.zlib">
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <class name="BuiltInZlibDeflater" extends="java.util.zip.Deflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="BuiltInZlibDeflater" type="int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Deflater to make it conform
+ to org.apache.hadoop.io.compress.Compressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <class name="BuiltInZlibInflater" extends="java.util.zip.Inflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="BuiltInZlibInflater" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibInflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Inflater to make it conform
+ to org.apache.hadoop.io.compress.Decompressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <class name="ZlibCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="ZlibCompressor" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified compression level.
+ Compressed data will be generated in ZLIB format.
+
+ @param level Compression level #CompressionLevel
+ @param strategy Compression strategy #CompressionStrategy
+ @param header Compression header #CompressionHeader
+ @param directBufferSize Size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default compression level.
+ Compressed data will be generated in ZLIB format.]]>
+ </doc>
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <class name="ZlibCompressor.CompressionHeader" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The type of header for compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <class name="ZlibCompressor.CompressionLevel" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <class name="ZlibCompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <class name="ZlibDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="ZlibDecompressor" type="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new decompressor.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <class name="ZlibDecompressor.CompressionHeader" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The headers to detect from compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+ <class name="ZlibFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ZlibFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeZlibLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-zlib code is loaded & initialized correctly and
+ can be loaded for this job.
+
+ @param conf configuration
+ @return <code>true</code> if native-zlib is loaded & initialized
+ and can be loaded for this job, else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of factories to create the right
+ zlib/gzip compressor/decompressor instances.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+</package>
+<package name="org.apache.hadoop.io.retry">
+ <!-- start class org.apache.hadoop.io.retry.RetryPolicies -->
+ <class name="RetryPolicies" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryPolicies"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="retryUpToMaximumCountWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumTimeWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxTime" type="long"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying for a maximum time, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumCountWithProportionalSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by the number of tries so far.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="exponentialBackoffRetry" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by a random
+ number in the range of [0, 2 to the number of retries)
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map&lt;java.lang.Class&lt;? extends java.lang.Exception&gt;, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByRemoteException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map&lt;java.lang.Class&lt;? extends java.lang.Exception&gt;, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ A retry policy for RemoteException
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <field name="TRY_ONCE_THEN_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail by re-throwing the exception.
+ This corresponds to having no retry mechanism in place.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="TRY_ONCE_DONT_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail silently for <code>void</code> methods, or by
+ re-throwing the exception for non-<code>void</code> methods.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="RETRY_FOREVER" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Keep trying forever.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A collection of useful implementations of {@link RetryPolicy}.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryPolicies -->
+ <!-- start interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <interface name="RetryPolicy" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="shouldRetry" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Exception"/>
+ <param name="retries" type="int"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[<p>
+ Determines whether the framework should retry a
+ method for the given exception, and the number
+ of retries that have been made for that operation
+ so far.
+ </p>
+ @param e The exception that caused the method to fail.
+ @param retries The number of times the method has been retried.
+ @return <code>true</code> if the method should be retried,
+ <code>false</code> if the method should not be retried
+ but shouldn't fail with an exception (only for void methods).
+ @throws Exception The re-thrown exception <code>e</code> indicating
+ that the method failed and should not be retried further.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Specifies a policy for retrying method failures.
+ Implementations of this interface should be immutable.
+ </p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <!-- start class org.apache.hadoop.io.retry.RetryProxy -->
+ <class name="RetryProxy" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryProxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class&lt;?&gt;"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the same retry policy for each method in the interface.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param retryPolicy the policy for retirying method call failures
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class&lt;?&gt;"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="methodNameToPolicyMap" type="java.util.Map&lt;java.lang.String, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the a set of retry policies specified by method name.
+ If no retry policy is defined for a method then a default of
+ {@link RetryPolicies#TRY_ONCE_THEN_FAIL} is used.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param methodNameToPolicyMap a map of method names to retry policies
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for creating retry proxies.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryProxy -->
+ <doc>
+ <![CDATA[<p>
+A mechanism for selectively retrying methods that throw exceptions under certain circumstances.
+</p>
+
+<p>
+Typical usage is
+</p>
+
+<pre>
+UnreliableImplementation unreliableImpl = new UnreliableImplementation();
+UnreliableInterface unreliable = (UnreliableInterface)
+ RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+ RetryPolicies.retryUpToMaximumCountWithFixedSleep(4, 10, TimeUnit.SECONDS));
+unreliable.call();
+</pre>
+
+<p>
+This will retry any method called on <code>unreliable</code> four times - in this case the <code>call()</code>
+method - sleeping 10 seconds between
+each retry. There are a number of {@link org.apache.hadoop.io.retry.RetryPolicies retry policies}
+available, or you can implement a custom one by implementing {@link org.apache.hadoop.io.retry.RetryPolicy}.
+It is also possible to specify retry policies on a
+{@link org.apache.hadoop.io.retry.RetryProxy#create(Class, Object, Map) per-method basis}.
+</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.io.serializer">
+ <!-- start interface org.apache.hadoop.io.serializer.Deserializer -->
+ <interface name="Deserializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the deserializer for reading.</p>]]>
+ </doc>
+ </method>
+ <method name="deserialize" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ Deserialize the next object from the underlying input stream.
+ If the object <code>t</code> is non-null then this deserializer
+ <i>may</i> set its internal state to the next object read from the input
+ stream. Otherwise, if the object <code>t</code> is null a new
+ deserialized object will be created.
+ </p>
+ @return the deserialized object]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying input stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for deserializing objects of type <T> from an
+ {@link InputStream}.
+ </p>
+
+ <p>
+ Deserializers are stateful, but must not buffer the input since
+ other producers may read from the input between calls to
+ {@link #deserialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Deserializer -->
+ <!-- start class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <class name="DeserializerComparator" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator&lt;T&gt;"/>
+ <constructor name="DeserializerComparator" type="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link Deserializer} to deserialize
+ the objects to be compared so that the standard {@link Comparator} can
+ be used to compare them.
+ </p>
+ <p>
+ One may optimize compare-intensive operations by using a custom
+ implementation of {@link RawComparator} that operates directly
+ on byte representations.
+ </p>
+ @param <T>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <class name="JavaSerialization" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization&lt;java.io.Serializable&gt;"/>
+ <constructor name="JavaSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;java.io.Serializable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;java.io.Serializable&gt;"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;java.io.Serializable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;java.io.Serializable&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ An experimental {@link Serialization} for Java {@link Serializable} classes.
+ </p>
+ @see JavaSerializationComparator]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <class name="JavaSerializationComparator" extends="org.apache.hadoop.io.serializer.DeserializerComparator&lt;T&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JavaSerializationComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o1" type="T extends java.io.Serializable &amp; java.lang.Comparable&lt;T&gt;"/>
+ <param name="o2" type="T extends java.io.Serializable &amp; java.lang.Comparable&lt;T&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link JavaSerialization}
+ {@link Deserializer} to deserialize objects that are then compared via
+ their {@link Comparable} interfaces.
+ </p>
+ @param <T>
+ @see JavaSerialization]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serialization -->
+ <interface name="Serialization" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Allows clients to test whether this {@link Serialization}
+ supports the given class.]]>
+ </doc>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[@return a {@link Serializer} for the given class.]]>
+ </doc>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[@return a {@link Deserializer} for the given class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Encapsulates a {@link Serializer}/{@link Deserializer} pair.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serialization -->
+ <!-- start class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <class name="SerializationFactory" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SerializationFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Serializations are found by reading the <code>io.serializations</code>
+ property from <code>conf</code>, which is a comma-delimited list of
+ classnames.
+ </p>]]>
+ </doc>
+ </constructor>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <method name="getSerialization" return="org.apache.hadoop.io.serializer.Serialization&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for {@link Serialization}s.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serializer -->
+ <interface name="Serializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the serializer for writing.</p>]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Serialize <code>t</code> to the underlying output stream.</p>]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying output stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for serializing objects of type <T> to an
+ {@link OutputStream}.
+ </p>
+
+ <p>
+ Serializers are stateful, but must not buffer the output since
+ other producers may write to the output between calls to
+ {@link #serialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serializer -->
+ <!-- start class org.apache.hadoop.io.serializer.WritableSerialization -->
+ <class name="WritableSerialization" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization&lt;org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="WritableSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;org.apache.hadoop.io.Writable&gt;"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;org.apache.hadoop.io.Writable&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Serialization} for {@link Writable}s that delegates to
+ {@link Writable#write(java.io.DataOutput)} and
+ {@link Writable#readFields(java.io.DataInput)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.WritableSerialization -->
+ <doc>
+ <![CDATA[<p>
+This package provides a mechanism for using different serialization frameworks
+in Hadoop. The property "io.serializations" defines a list of
+{@link org.apache.hadoop.io.serializer.Serialization}s that know how to create
+{@link org.apache.hadoop.io.serializer.Serializer}s and
+{@link org.apache.hadoop.io.serializer.Deserializer}s.
+</p>
+
+<p>
+To add a new serialization framework write an implementation of
+{@link org.apache.hadoop.io.serializer.Serialization} and add its name to the
+"io.serializations" property.
+</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.ipc">
+ <!-- start class org.apache.hadoop.ipc.Client -->
+ <class name="Client" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Client" type="java.lang.Class, org.apache.hadoop.conf.Configuration, javax.net.SocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client whose values are of the given {@link Writable}
+ class.]]>
+ </doc>
+ </constructor>
+ <constructor name="Client" type="java.lang.Class&lt;?&gt;, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client with the default SocketFactory
+ @param valueClass
+ @param conf]]>
+ </doc>
+ </constructor>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop all threads related to this client. No further calls may be made
+ using this client.]]>
+ </doc>
+ </method>
+ <method name="setTimeout"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="timeout" type="int"/>
+ <doc>
+ <![CDATA[Sets the timeout used for network i/o.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a call, passing <code>param</code>, to the IPC server running at
+ <code>address</code>, returning the value. Throws exceptions if there are
+ network problems or if the remote code threw an exception.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="params" type="org.apache.hadoop.io.Writable[]"/>
+ <param name="addresses" type="java.net.InetSocketAddress[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Makes a set of calls in parallel. Each parameter is sent to the
+ corresponding address. When all values are available, or have timed out
+ or errored, the collected results are returned in an array. The array
+ contains nulls for calls that timed out or errored.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A client for an IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Server]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Client -->
+ <!-- start class org.apache.hadoop.ipc.RemoteException -->
+ <class name="RemoteException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RemoteException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lookupTypes" type="java.lang.Class[]"/>
+ <doc>
+ <![CDATA[If this remote exception wraps up one of the lookupTypes
+ then return this exception.
+ <p>
+ Unwraps any IOException.
+
+ @param lookupTypes the desired exception class.
+ @return IOException, which is either the lookupClass exception or this.]]>
+ </doc>
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Instantiate and return the exception wrapped up by this remote exception.
+
+ <p> This unwraps any <code>Throwable</code> that has a constructor taking
+ a <code>String</code> as a parameter.
+ Otherwise it returns this.
+
+ @return <code>Throwable]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RemoteException -->
+ <!-- start class org.apache.hadoop.ipc.RPC -->
+ <class name="RPC" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="waitForProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object with the default SocketFactory
+
+ @param protocol
+ @param clientVersion
+ @param addr
+ @param conf
+ @return a proxy instance
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopProxy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="proxy" type="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <doc>
+ <![CDATA[Stop this proxy and release its invoker's resource
+ @param proxy the proxy to be stopped]]>
+ </doc>
+ </method>
+ <method name="call" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="method" type="java.lang.reflect.Method"/>
+ <param name="params" type="java.lang.Object[][]"/>
+ <param name="addrs" type="java.net.InetSocketAddress[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Expert: Make multiple, parallel calls to a set of servers.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="numHandlers" type="int"/>
+ <param name="verbose" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple RPC mechanism.
+
+ A <i>protocol</i> is a Java interface. All parameters and return types must
+ be one of:
+
+ <ul> <li>a primitive type, <code>boolean</code>, <code>byte</code>,
+ <code>char</code>, <code>short</code>, <code>int</code>, <code>long</code>,
+ <code>float</code>, <code>double</code>, or <code>void</code>; or</li>
+
+ <li>a {@link String}; or</li>
+
+ <li>a {@link Writable}; or</li>
+
+ <li>an array of the above types</li> </ul>
+
+ All methods in the protocol should throw only IOException. No field data of
+ the protocol instance is transmitted.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC -->
+ <!-- start class org.apache.hadoop.ipc.RPC.Server -->
+ <class name="RPC.Server" extends="org.apache.hadoop.ipc.Server"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on]]>
+ </doc>
+ </constructor>
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on
+ @param numHandlers the number of method handler threads to run
+ @param verbose whether each call should be logged]]>
+ </doc>
+ </constructor>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receivedTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An RPC Server.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.Server -->
+ <!-- start class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <class name="RPC.VersionMismatch" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.VersionMismatch" type="java.lang.String, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a version mismatch exception
+ @param interfaceName the name of the protocol mismatch
+ @param clientVersion the client's version of the protocol
+ @param serverVersion the server's version of the protocol]]>
+ </doc>
+ </constructor>
+ <method name="getInterfaceName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the interface name
+ @return the java class name
+ (eg. org.apache.hadoop.mapred.InterTrackerProtocol)]]>
+ </doc>
+ </method>
+ <method name="getClientVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the client's preferred version]]>
+ </doc>
+ </method>
+ <method name="getServerVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the server's agreed to version.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A version mismatch for the RPC protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <!-- start class org.apache.hadoop.ipc.Server -->
+ <class name="Server" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class, int, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class, int, org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a server listening on the named port and address. Parameters passed must
+ be of the named class. The <code>handlerCount</handlerCount> determines
+ the number of handler threads that will be used to process calls.]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.ipc.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the server instance called under or null. May be called under
+ {@link #call(Writable, long)} implementations, and under {@link Writable}
+ methods of paramters and return values. Permits applications to access
+ the server context.]]>
+ </doc>
+ </method>
+ <method name="getRemoteIp" return="java.net.InetAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the remote side ip address when invoked inside an RPC
+ Returns null incase of an error.]]>
+ </doc>
+ </method>
+ <method name="getRemoteAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns remote address as a string when invoked inside an RPC.
+ Returns null in case of an error.]]>
+ </doc>
+ </method>
+ <method name="bind"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.ServerSocket"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <param name="backlog" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A convience method to bind to a given address and report
+ better exceptions if the address is not a valid host.
+ @param socket the socket to bind
+ @param address the address to bind to
+ @param backlog the number of connections allowed in the queue
+ @throws BindException if the address can't be bound
+ @throws UnknownHostException if the address isn't a valid host name
+ @throws IOException other random errors from bind]]>
+ </doc>
+ </method>
+ <method name="setTimeout"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="timeout" type="int"/>
+ <doc>
+ <![CDATA[Sets the timeout used for network i/o.]]>
+ </doc>
+ </method>
+ <method name="setSocketSendBufSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Sets the socket buffer size used for responding to RPCs]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts the service. Must be called before any calls will be handled.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops the service. No new calls will be handled after this is called.]]>
+ </doc>
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Wait for the server to be stopped.
+ Does not wait for all subthreads to finish.
+ See {@link #stop()}.]]>
+ </doc>
+ </method>
+ <method name="getListenerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the socket (ip+port) on which the RPC server is listening to.
+ @return the socket (ip+port) on which the RPC server is listening to.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receiveTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called for each call.]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <field name="HEADER" type="java.nio.ByteBuffer"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The first four bytes of Hadoop RPC connections]]>
+ </doc>
+ </field>
+ <field name="CURRENT_VERSION" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="rpcMetrics" type="org.apache.hadoop.ipc.metrics.RpcMetrics"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Client]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Server -->
+ <!-- start interface org.apache.hadoop.ipc.VersionedProtocol -->
+ <interface name="VersionedProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return protocol version corresponding to protocol interface.
+ @param protocol The classname of the protocol interface
+ @param clientVersion The version of the protocol that the client speaks
+ @return the version that the server will speak]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Superclass of all protocols that use Hadoop RPC.
+ Subclasses of this interface are also supposed to have
+ a static final long versionID field.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.VersionedProtocol -->
+ <doc>
+ <![CDATA[Tools to help define network clients and servers.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.ipc.metrics">
+ <!-- start class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <class name="RpcMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="RpcMetrics" type="java.lang.String, java.lang.String, org.apache.hadoop.ipc.Server"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Push the metrics to the monitoring subsystem on doUpdate() call.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="rpcQueueTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The metrics variables are public:
+ - they can be set directly by calling their set/inc methods
+ -they can also be read directly - e.g. JMX does this.]]>
+ </doc>
+ </field>
+ <field name="rpcProcessingTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="rpcDiscardedOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="metricsList" type="java.util.Map&lt;java.lang.String, org.apache.hadoop.metrics.util.MetricsTimeVaryingRate&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various RPC statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #rpcDiscardedOps}.inc(time)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <!-- start interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+ <interface name="RpcMgtMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRpcOpsNumber" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of RPC Operations in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for RPC Operations in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Average RPC Operation Queued Time in the last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsDiscardedOpsNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Discarded RPC operations due to timeout in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsDiscardedOpsQtime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average Queued time for Discarded RPC Operations in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all min max times]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for the RPC layer.
+ Many of the statistics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the statistics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ rpc.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+</package>
+<package name="org.apache.hadoop.log">
+ <!-- start class org.apache.hadoop.log.LogLevel -->
+ <class name="LogLevel" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[A command line implementation]]>
+ </doc>
+ </method>
+ <field name="USAGES" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Change log level in runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel -->
+ <!-- start class org.apache.hadoop.log.LogLevel.Servlet -->
+ <class name="LogLevel.Servlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel.Servlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A servlet implementation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel.Servlet -->
+</package>
+<package name="org.apache.hadoop.mapred">
+ <!-- start class org.apache.hadoop.mapred.ClusterStatus -->
+ <class name="ClusterStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of task trackers in the cluster.
+
+ @return the number of task trackers in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running map tasks in the cluster.
+
+ @return the number of currently running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running reduce tasks in the cluster.
+
+ @return the number of currently running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running map tasks in the cluster.
+
+ @return the maximum capacity for running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running reduce tasks in the cluster.
+
+ @return the maximum capacity for running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getJobTrackerState" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current state of the <code>JobTracker</code>,
+ as {@link JobTracker.State}
+
+ @return the current state of the <code>JobTracker</code>.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Status information on the current state of the Map-Reduce cluster.
+
+ <p><code>ClusterStatus</code> provides clients with information such as:
+ <ol>
+ <li>
+ Size of the cluster.
+ </li>
+ <li>
+ Task capacity of the cluster.
+ </li>
+ <li>
+ The number of currently running map & reduce tasks.
+ </li>
+ <li>
+ State of the <code>JobTracker</code>.
+ </li>
+ </ol></p>
+
+ <p>Clients can query for the latest <code>ClusterStatus</code>, via
+ {@link JobClient#getClusterStatus()}.</p>
+
+ @see JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ClusterStatus -->
+ <!-- start class org.apache.hadoop.mapred.CompletedJobStatusStore -->
+ <class name="CompletedJobStatusStore" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <method name="isActive" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Indicates if job status persistency is active or not.
+
+ @return TRUE if active, FALSE otherwise.]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="store"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobInProgress"/>
+ <doc>
+ <![CDATA[Persists a job in DFS.
+
+ @param job the job about to be 'retired']]>
+ </doc>
+ </method>
+ <method name="readJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This method retrieves JobStatus information from DFS stored using
+ store method.
+
+ @param jobId the jobId for which jobStatus is queried
+ @return JobStatus object, null if not able to retrieve]]>
+ </doc>
+ </method>
+ <method name="readJobProfile" return="org.apache.hadoop.mapred.JobProfile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This method retrieves JobProfile information from DFS stored using
+ store method.
+
+ @param jobId the jobId for which jobProfile is queried
+ @return JobProfile object, null if not able to retrieve]]>
+ </doc>
+ </method>
+ <method name="readCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This method retrieves Counters information from DFS stored using
+ store method.
+
+ @param jobId the jobId for which Counters is queried
+ @return Counters object, null if not able to retrieve]]>
+ </doc>
+ </method>
+ <method name="readJobTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxEvents" type="int"/>
+ <doc>
+ <![CDATA[This method retrieves TaskCompletionEvents information from DFS stored
+ using store method.
+
+ @param jobId the jobId for which TaskCompletionEvents is queried
+ @param fromEventId events offset
+ @param maxEvents max number of events
+ @return TaskCompletionEvent[], empty array if not able to retrieve]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Persists and retrieves the Job info of a job into/from DFS.
+ <p/>
+ If the retain time is zero jobs are not persisted.
+ <p/>
+ A daemon thread cleans up job info files older than the retain time
+ <p/>
+ The retain time can be set with the 'persist.jobstatus.hours'
+ configuration variable (it is in hours).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.CompletedJobStatusStore -->
+ <!-- start class org.apache.hadoop.mapred.Counters -->
+ <class name="Counters" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.mapred.Counters.Group&gt;"/>
+ <constructor name="Counters"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getGroupNames" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all counter classes.
+ @return Set of counter names.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.mapred.Counters.Group&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getGroup" return="org.apache.hadoop.mapred.Counters.Group"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="groupName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the named counter group, or an empty group if there is none
+ with the specified name.]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Find the counter for the given enum. The same enum will always return the
+ same counter.
+ @param key the counter key
+ @return the matching counter object]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Find a counter by using strings
+ @param group the name of the group
+ @param id the id of the counter within the group (0 to N-1)
+ @param name the internal name of the counter
+ @return the counter for that name]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param key identifies a counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Returns current value of the specified counter, or 0 if the counter
+ does not exist.]]>
+ </doc>
+ </method>
+ <method name="incrAllCounters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Increments multiple counters by their amounts in another Counters
+ instance.
+ @param other the other Counters instance]]>
+ </doc>
+ </method>
+ <method name="sum" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.mapred.Counters"/>
+ <param name="b" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Convenience method for computing the sum of two sets of counters.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of counters, by summing the number of counters
+ in each group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the set of groups.
+ The external format is:
+ #groups (groupName group)*
+
+ i.e. the number of groups followed by 0 or more groups, where each
+ group is of the form:
+
+ groupDisplayName #counters (false | true counter)*
+
+ where each counter is of the form:
+
+ name value]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a set of groups.]]>
+ </doc>
+ </method>
+ <method name="log"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Logs the current counter values.
+ @param log The log to use.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return textual representation of the counter values.]]>
+ </doc>
+ </method>
+ <method name="makeCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert a counters object into a single line that is easy to parse.
+ @return the string with "name=value" for each counter and separated by ","]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A set of named counters.
+
+ <p><code>Counters</code> represent global counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> can be of
+ any {@link Enum} type.</p>
+
+ <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
+ counters from a particular <code>Enum</code> class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Counter -->
+ <class name="Counters.Counter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the counter.
+ @return the user facing name of the counter]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[What is the current value of this counter?
+ @return the current value]]>
+ </doc>
+ </method>
+ <method name="increment"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Increment this counter by the given value
+ @param incr the value to increase this counter by]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A counter record, comprising its name and value.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Counter -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Group -->
+ <class name="Counters.Group" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.mapred.Counters.Counter&gt;"/>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns raw name of the group. This is the name of the enum class
+ for this group of counters.]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns localized name of the group. This is the same as getName() by
+ default, but different if an appropriate ResourceBundle is found.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="counterName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the specified counter, or 0 if the counter does
+ not exist.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given id and create it if it doesn't exist.
+ @param id the numeric id of the counter within the group
+ @param name the internal counter name
+ @return the counter]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of counters in this group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.mapred.Counters.Counter&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<code>Group</code> of counters, comprising of counters from a particular
+ counter {@link Enum} class.
+
+ <p><code>Group</code>handles localization of the class name and the
+ counter names.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Group -->
+ <!-- start class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <class name="DefaultJobHistoryParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DefaultJobHistoryParser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseJobTasks"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobHistoryFile" type="java.lang.String"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobHistory.JobInfo"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Populates a JobInfo object from the job's history log file.
+ @param jobHistoryFile history file for this job.
+ @param job a precreated JobInfo object, should be non-null.
+ @param fs FileSystem where historyFile is present.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Default parser for job history files. It creates object model from
+ job history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <!-- start class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <class name="FileAlreadyExistsException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileAlreadyExistsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileAlreadyExistsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when target file already exists for any operation and
+ is not configured to be overwritten.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <!-- start class org.apache.hadoop.mapred.FileInputFormat -->
+ <class name="FileInputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <constructor name="FileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setMinSplitSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="minSplitSize" type="long"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="filename" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Is the given filename splitable? Usually, true, but if the file is
+ stream compressed, it will not be.
+
+ <code>FileInputFormat</code> implementations can override this and return
+ <code>false</code> to ensure that individual input files are never split-up
+ so that {@link Mapper}s process entire files.
+
+ @param fs the file system that the file is on
+ @param filename the file name to check
+ @return is this file splitable?]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setInputPathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="filter" type="java.lang.Class&lt;? extends org.apache.hadoop.fs.PathFilter&gt;"/>
+ <doc>
+ <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job.
+
+ @param filter the PathFilter class use for filtering the input paths.]]>
+ </doc>
+ </method>
+ <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get a PathFilter instance of the filter set for the input paths.
+
+ @return the PathFilter instance set for the job, NULL if none has been set.]]>
+ </doc>
+ </method>
+ <method name="listPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression.
+
+ @param job the job to list input paths for
+ @return array of Path objects
+ @throws IOException if zero items.]]>
+ </doc>
+ </method>
+ <method name="validateInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Splits files returned by {@link #listPaths(JobConf)} when
+ they're too big.]]>
+ </doc>
+ </method>
+ <method name="computeSplitSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="goalSize" type="long"/>
+ <param name="minSize" type="long"/>
+ <param name="blockSize" type="long"/>
+ </method>
+ <method name="getBlockIndex" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+ <param name="offset" type="long"/>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the given comma separated paths as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be set as
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add the given comma separated paths to the list of inputs for
+ the map-reduce job.
+
+ @param conf The configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be added to
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Set the array of {@link Path}s as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job.
+ @param inputPaths the {@link Path}s of the input directories/files
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+
+ @param conf The configuration of the job
+ @return the list of input {@link Path}s for the map-reduce job.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class for file-based {@link InputFormat}.
+
+ <p><code>FileInputFormat</code> is the base class for all file-based
+ <code>InputFormat</code>s. This provides generic implementations of
+ {@link #validateInput(JobConf)} and {@link #getSplits(JobConf, int)}.
+ Implementations fo <code>FileInputFormat</code> can also override the
+ {@link #isSplitable(FileSystem, Path)} method to ensure input-files are
+ not split-up and are processed as a whole by {@link Mapper}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileOutputFormat -->
+ <class name="FileOutputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="FileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setCompressOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+ <code>false</code> otherwise]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+ compress the job outputs]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the
+ job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the {@link Path} of the output directory for the map-reduce job.
+
+ @param conf The configuration of the job.
+ @param outputDir the {@link Path} of the output directory for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the output directory for the map-reduce job.
+
+ @return the {@link Path} to the output directory for the map-reduce job.
+ @see FileOutputFormat#getWorkOutputPath(JobConf)]]>
+ </doc>
+ </method>
+ <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the task's temporary output directory
+ for the map-reduce job
+
+ <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4>
+
+ <p>Some applications need to create/write-to side-files, which differ from
+ the actual job-outputs.
+
+ <p>In such cases there could be issues with 2 instances of the same TIP
+ (running simultaneously e.g. speculative tasks) trying to open/write-to the
+ same file (path) on HDFS. Hence the application-writer will have to pick
+ unique names per task-attempt (e.g. using the taskid, say
+ <tt>task_200709221812_0001_m_000000_0</tt>), not just per TIP.</p>
+
+ <p>To get around this the Map-Reduce framework helps the application-writer
+ out by maintaining a special
+ <tt>${mapred.output.dir}/_temporary/_${taskid}</tt>
+ sub-directory for each task-attempt on HDFS where the output of the
+ task-attempt goes. On successful completion of the task-attempt the files
+ in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only)
+ are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the
+ framework discards the sub-directory of unsuccessful task-attempts. This
+ is completely transparent to the application.</p>
+
+ <p>The application-writer can take advantage of this by creating any
+ side-files required in <tt>${mapred.work.output.dir}</tt> during execution
+ of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the
+ framework will move them out similarly - thus she doesn't have to pick
+ unique paths per task-attempt.</p>
+
+ <p><i>Note</i>: the value of <tt>${mapred.work.output.dir}</tt> during
+ execution of a particular task-attempt is actually
+ <tt>${mapred.output.dir}/_temporary/_{$taskid}</tt>, and this value is
+ set by the map-reduce framework. So, just create any side-files in the
+ path returned by {@link #getWorkOutputPath(JobConf)} from map/reduce
+ task to take advantage of this feature.</p>
+
+ <p>The entire discussion holds true for maps of jobs with
+ reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
+ goes directly to HDFS.</p>
+
+ @return the {@link Path} to the task's temporary output directory
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base class for {@link OutputFormat}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileSplit -->
+ <class name="FileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[Constructs a split.
+ @deprecated
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process]]>
+ </doc>
+ </constructor>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null]]>
+ </doc>
+ </constructor>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file containing this split's data.]]>
+ </doc>
+ </method>
+ <method name="getStart" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The position of the first byte in the file to process.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the file to process.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A section of an input file. Returned by {@link
+ InputFormat#getSplits(JobConf, int)} and passed to
+ {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileSplit -->
+ <!-- start interface org.apache.hadoop.mapred.InputFormat -->
+ <interface name="InputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="validateInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check for validity of the input-specification for the job.
+
+ <p>This method is used to validate the input directories when a job is
+ submitted so that the {@link JobClient} can fail early, with an useful
+ error message, in case of errors. For e.g. input directory does not exist.
+ </p>
+
+ @param job job configuration.
+ @throws InvalidInputException if the job does not have valid input]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically split the set of input files for the job.
+
+ <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper}
+ for processing.</p>
+
+ <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the
+ input files are not physically split into chunks. For e.g. a split could
+ be <i>&lt;input-file-path, start, offset&gt;</i> tuple.
+
+ @param job job configuration.
+ @param numSplits the desired number of splits, a hint.
+ @return an array of {@link InputSplit}s for the job.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordReader} for the given {@link InputSplit}.
+
+ <p>It is the responsibility of the <code>RecordReader</code> to respect
+ record boundaries while processing the logical split to present a
+ record-oriented view to the individual task.</p>
+
+ @param split the {@link InputSplit}
+ @param job the job that this split belongs to
+ @return a {@link RecordReader}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputFormat</code> describes the input-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the input-specification of the job.
+ <li>
+ Split-up the input file(s) into logical {@link InputSplit}s, each of
+ which is then assigned to an individual {@link Mapper}.
+ </li>
+ <li>
+ Provide the {@link RecordReader} implementation to be used to glean
+ input records from the logical <code>InputSplit</code> for processing by
+ the {@link Mapper}.
+ </li>
+ </ol>
+
+ <p>The default behavior of file-based {@link InputFormat}s, typically
+ sub-classes of {@link FileInputFormat}, is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of the input files. However, the {@link FileSystem} blocksize of
+ the input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Clearly, logical splits based on input-size is insufficient for many
+ applications since record boundaries are to respected. In such cases, the
+ application has to also implement a {@link RecordReader} on whom lies the
+ responsibilty to respect record-boundaries and present a record-oriented
+ view of the logical <code>InputSplit</code> to the individual task.
+
+ @see InputSplit
+ @see RecordReader
+ @see JobClient
+ @see FileInputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.InputSplit -->
+ <interface name="InputSplit" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the total number of bytes in the data of the <code>InputSplit</code>.
+
+ @return the number of bytes in the input split.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hostnames where the input split is located.
+
+ @return list of hostnames where data of the <code>InputSplit</code> is
+ located as an array of <code>String</code>s.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputSplit</code> represents the data to be processed by an
+ individual {@link Mapper}.
+
+ <p>Typically, it presents a byte-oriented view on the input and is the
+ responsibility of {@link RecordReader} of the job to process this and present
+ a record-oriented view.
+
+ @see InputFormat
+ @see RecordReader]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputSplit -->
+ <!-- start class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <class name="InvalidFileTypeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidFileTypeException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidFileTypeException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when file type differs from the desired file type. like
+ getting a file when a directory is expected. Or a wrong file type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidInputException -->
+ <class name="InvalidInputException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidInputException" type="java.util.List&lt;java.io.IOException&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create the exception with the given list.
+ @param probs the list of problems to report. this list is not copied.]]>
+ </doc>
+ </constructor>
+ <method name="getProblems" return="java.util.List&lt;java.io.IOException&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete list of the problems reported.
+ @return the list of problems, which must not be modified]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get a summary message of the problems found.
+ @return the concatenated messages from all of the problems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class wraps a list of problems with the input, so that the user
+ can get a list of problems together instead of finding and fixing them one
+ by one.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidInputException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <class name="InvalidJobConfException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidJobConfException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidJobConfException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when jobconf misses some mendatory attributes
+ or value of some attributes is invalid.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <!-- start class org.apache.hadoop.mapred.IsolationRunner -->
+ <class name="IsolationRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IsolationRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Run a single task
+ @param args the first argument is the task directory]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.IsolationRunner -->
+ <!-- start class org.apache.hadoop.mapred.JobClient -->
+ <class name="JobClient" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobClient"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job client.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client with the given {@link JobConf}, and connect to the
+ default {@link JobTracker}.
+
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client, connect to the indicated job tracker.
+
+ @param jobTrackAddr the job tracker to connect to.
+ @param conf configuration.]]>
+ </doc>
+ </constructor>
+ <method name="getCommandLineConfig" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the command line configuration]]>
+ </doc>
+ </method>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Connect to the default {@link JobTracker}.
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the <code>JobClient</code>.]]>
+ </doc>
+ </method>
+ <method name="getFs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a filesystem handle. We need this to prepare jobs
+ for submission to the MapReduce system.
+
+ @return the filesystem handle.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobFile" type="java.lang.String"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param jobFile the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param job the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an {@link RunningJob} object to track an ongoing job. Returns
+ null if the id does not correspond to any known job.
+
+ @param jobid the jobid of the job.
+ @return the {@link RunningJob} handle to track the job, null if the
+ <code>jobid</code> doesn't correspond to any known job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the map tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the map tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the reduce tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the reduce tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the Map-Reduce cluster.
+
+ @return the status information about the Map-Reduce cluster as an object
+ of {@link ClusterStatus}.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are not completed and not failed.
+
+ @return array of {@link JobStatus} for the running/to-be-run jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are submitted.
+
+ @return array of {@link JobStatus} for the submitted jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="runJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Utility that submits a job, then polls for progress until the job is
+ complete.
+
+ @param job the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Sets the output filter for tasks. only those tasks are printed whose
+ output matches the filter.
+ @param newValue task filter.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the task output filter out of the JobConf.
+
+ @param job the JobConf to examine.
+ @return the filter level.]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Modify the JobConf to set the task output filter.
+
+ @param job the JobConf to modify.
+ @param newValue the value to set.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task output filter.
+ @return task filter.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[<code>JobClient</code> is the primary interface for the user-job to interact
+ with the {@link JobTracker}.
+
+ <code>JobClient</code> provides facilities to submit jobs, track their
+ progress, access component-tasks' reports/logs, get the Map-Reduce cluster
+ status information etc.
+
+ <p>The job submission process involves:
+ <ol>
+ <li>
+ Checking the input and output specifications of the job.
+ </li>
+ <li>
+ Computing the {@link InputSplit}s for the job.
+ </li>
+ <li>
+ Setup the requisite accounting information for the {@link DistributedCache}
+ of the job, if necessary.
+ </li>
+ <li>
+ Copying the job's jar and configuration to the map-reduce system directory
+ on the distributed file-system.
+ </li>
+ <li>
+ Submitting the job to the <code>JobTracker</code> and optionally monitoring
+ it's status.
+ </li>
+ </ol></p>
+
+ Normally the user creates the application, describes various facets of the
+ job via {@link JobConf} and then uses the <code>JobClient</code> to submit
+ the job and monitor its progress.
+
+ <p>Here is an example on how to use <code>JobClient</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ job.setInputPath(new Path("in"));
+ job.setOutputPath(new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ </pre></blockquote></p>
+
+ <h4 id="JobControl">Job Control</h4>
+
+ <p>At times clients would chain map-reduce jobs to accomplish complex tasks
+ which cannot be done via a single map-reduce job. This is fairly easy since
+ the output of the job, typically, goes to distributed file-system and that
+ can be used as the input for the next job.</p>
+
+ <p>However, this also means that the onus on ensuring jobs are complete
+ (success/failure) lies squarely on the clients. In such situations the
+ various job-control options are:
+ <ol>
+ <li>
+ {@link #runJob(JobConf)} : submits the job and returns only after
+ the job has completed.
+ </li>
+ <li>
+ {@link #submitJob(JobConf)} : only submits the job, then poll the
+ returned handle to the {@link RunningJob} to query status and make
+ scheduling decisions.
+ </li>
+ <li>
+ {@link JobConf#setJobEndNotificationURI(String)} : setup a notification
+ on job-completion, thus avoiding polling.
+ </li>
+ </ol></p>
+
+ @see JobConf
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient -->
+ <!-- start class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <class name="JobClient.TaskStatusFilter" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobClient.TaskStatusFilter&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <!-- start class org.apache.hadoop.mapred.JobConf -->
+ <class name="JobConf" extends="org.apache.hadoop.conf.Configuration"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <method name="getJar" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user jar for the map-reduce job.
+
+ @return the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJar"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jar" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user jar for the map-reduce job.
+
+ @param jar the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJarByClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the job's jar file by finding an example class location.
+
+ @param cls the example class.]]>
+ </doc>
+ </method>
+ <method name="getSystemDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the system directory where job-specific files are to be placed.
+
+ @return the system directory where job-specific files are to be placed.]]>
+ </doc>
+ </method>
+ <method name="getLocalDirs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="subdir" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a local file name. Files are distributed among configured
+ local directories.]]>
+ </doc>
+ </method>
+ <method name="setInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileInputFormat#setInputPaths(JobConf, Path...)} or
+ {@link FileInputFormat#setInputPaths(JobConf, String)}">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the {@link Path} of the input directory for the map-reduce job.
+
+ @param dir the {@link Path} of the input directory for the map-reduce job.
+ @deprecated Use {@link FileInputFormat#setInputPaths(JobConf, Path...)} or
+ {@link FileInputFormat#setInputPaths(JobConf, String)}]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileInputFormat#addInputPath(JobConf, Path)} or
+ {@link FileInputFormat#addInputPaths(JobConf, String)}">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+
+ @param dir {@link Path} to be added to the list of inputs for
+ the map-reduce job.
+ @deprecated Use {@link FileInputFormat#addInputPath(JobConf, Path)} or
+ {@link FileInputFormat#addInputPaths(JobConf, String)}]]>
+ </doc>
+ </method>
+ <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileInputFormat#getInputPaths(JobConf)}">
+ <doc>
+ <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+
+ @return the list of input {@link Path}s for the map-reduce job.
+ @deprecated Use {@link FileInputFormat#getInputPaths(JobConf)}]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reported username for this job.
+
+ @return the username]]>
+ </doc>
+ </method>
+ <method name="setUser"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the reported username for this job.
+
+ @param user the username for this job.]]>
+ </doc>
+ </method>
+ <method name="setKeepFailedTaskFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the framework should keep the intermediate files for
+ failed tasks.
+
+ @param keep <code>true</code> if framework should keep the intermediate files
+ for failed tasks, <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="getKeepFailedTaskFiles" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should the temporary files for failed tasks be kept?
+
+ @return should the files be kept?]]>
+ </doc>
+ </method>
+ <method name="setKeepTaskFilesPattern"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pattern" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set a regular expression for task names that should be kept.
+ The regular expression ".*_m_000123_0" would keep the files
+ for the first instance of map 123 that ran.
+
+ @param pattern the java.util.regex.Pattern to match against the
+ task names.]]>
+ </doc>
+ </method>
+ <method name="getKeepTaskFilesPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the regular expression that is matched against the task names
+ to see if we need to keep the files.
+
+ @return the pattern as a string, if it was set, othewise null.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the default file system.
+
+ @param dir the new current working directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the default file system.
+
+ @return the directory name.]]>
+ </doc>
+ </method>
+ <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileOutputFormat#getOutputPath(JobConf)} or
+ {@link FileOutputFormat#getWorkOutputPath(JobConf)}
+ Get the {@link Path} to the output directory for the map-reduce job.">
+ <doc>
+ <![CDATA[@deprecated Use {@link FileOutputFormat#getOutputPath(JobConf)} or
+ {@link FileOutputFormat#getWorkOutputPath(JobConf)}
+ Get the {@link Path} to the output directory for the map-reduce job.
+
+ @return the {@link Path} to the output directory for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileOutputFormat#setOutputPath(JobConf, Path)}
+ Set the {@link Path} of the output directory for the map-reduce job.
+
+ lEsS_tHaNp>lEsS_tHaNi>NotelEsS_tHaN/i>:
+ lEsS_tHaN/p>">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[@deprecated Use {@link FileOutputFormat#setOutputPath(JobConf, Path)}
+ Set the {@link Path} of the output directory for the map-reduce job.
+
+ <p><i>Note</i>:
+ </p>
+ @param dir the {@link Path} of the output directory for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getInputFormat" return="org.apache.hadoop.mapred.InputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link InputFormat} implementation for the map-reduce job,
+ defaults to {@link TextInputFormat} if not specified explicity.
+
+ @return the {@link InputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link InputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link InputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="getOutputFormat" return="org.apache.hadoop.mapred.OutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link OutputFormat} implementation for the map-reduce job,
+ defaults to {@link TextOutputFormat} if not specified explicity.
+
+ @return the {@link OutputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setOutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputFormat&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link OutputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link OutputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="setCompressMapOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Should the map outputs be compressed before transfer?
+ Uses the SequenceFile compression.
+
+ @param compress should the map outputs be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressMapOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Are the outputs of the maps be compressed?
+
+ @return <code>true</code> if the outputs of the maps are to be compressed,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionType} for the map outputs.
+
+ @param style the {@link CompressionType} to control how the map outputs
+ are compressed.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link CompressionType} for the map outputs.
+
+ @return the {@link CompressionType} for map outputs, defaulting to
+ {@link CompressionType#RECORD}.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the given class as the {@link CompressionCodec} for the map outputs.
+
+ @param codecClass the {@link CompressionCodec} class that will compress
+ the map outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the map outputs.
+
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} class that should be used to compress the
+ map outputs.
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getMapOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the map output data. If it is not set, use the
+ (final) output key class. This allows the map output key class to be
+ different than the final output key class.
+
+ @return the map output key class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the map output data. This allows the user to
+ specify the map output key class to be different than the final output
+ value class.
+
+ @param theClass the map output key class.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for the map output data. If it is not set, use the
+ (final) output value class This allows the map output value class to be
+ different than the final output value class.
+
+ @return the map output value class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for the map output data. This allows the user to
+ specify the map output value class to be different than the final output
+ value class.
+
+ @param theClass the map output value class.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the job output data.
+
+ @return the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the job output data.
+
+ @param theClass the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link RawComparator} comparator used to compare keys.
+
+ @return the {@link RawComparator} comparator used to compare keys.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyComparatorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.RawComparator&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link RawComparator} comparator used to compare keys.
+
+ @param theClass the {@link RawComparator} comparator used to
+ compare keys.
+ @see #setOutputValueGroupingComparator(Class)]]>
+ </doc>
+ </method>
+ <method name="getOutputValueGroupingComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user defined {@link WritableComparable} comparator for
+ grouping keys of inputs to the reduce.
+
+ @return comparator set by the user for grouping values.
+ @see #setOutputValueGroupingComparator(Class) for details.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueGroupingComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.RawComparator&gt;"/>
+ <doc>
+ <![CDATA[Set the user defined {@link RawComparator} comparator for
+ grouping keys in the input to the reduce.
+
+ <p>This comparator should be provided if the equivalence rules for keys
+ for sorting the intermediates are different from those for grouping keys
+ before each call to
+ {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
+
+ <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
+ in a single call to the reduce function if K1 and K2 compare as equal.</p>
+
+ <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
+ how keys are sorted, this can be used in conjunction to simulate
+ <i>secondary sort on values</i>.</p>
+
+ <p><i>Note</i>: This is not a guarantee of the reduce sort being
+ <i>stable</i> in any sense. (In any case, with the order of available
+ map-outputs to the reduce being non-deterministic, it wouldn't make
+ that much sense.)</p>
+
+ @param theClass the comparator class to be used for grouping keys.
+ It should implement <code>RawComparator</code>.
+ @see #setOutputKeyComparatorClass(Class)]]>
+ </doc>
+ </method>
+ <method name="getOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for job outputs.
+
+ @return the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for job outputs.
+
+ @param theClass the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapperClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Mapper} class for the job.
+
+ @return the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapperClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Mapper} class for the job.
+
+ @param theClass the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getMapRunnerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.MapRunnable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link MapRunnable} class for the job.
+
+ @return the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapRunnerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.MapRunnable&gt;"/>
+ <doc>
+ <![CDATA[Expert: Set the {@link MapRunnable} class for the job.
+
+ Typically used to exert greater control on {@link Mapper}s.
+
+ @param theClass the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getPartitionerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Partitioner&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Partitioner} used to partition {@link Mapper}-outputs
+ to be sent to the {@link Reducer}s.
+
+ @return the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setPartitionerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Partitioner&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Partitioner} class used to partition
+ {@link Mapper}-outputs to be sent to the {@link Reducer}s.
+
+ @param theClass the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getReducerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Reducer} class for the job.
+
+ @return the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setReducerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Reducer} class for the job.
+
+ @param theClass the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getCombinerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers. Typically the combiner is same as the
+ the {@link Reducer} for the job i.e. {@link #getReducerClass()}.
+
+ @return the user-defined combiner class used to combine map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCombinerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"/>
+ <doc>
+ <![CDATA[Set the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers.
+
+ <p>The combiner is a task-level aggregation operation which, in some cases,
+ helps to cut down the amount of data transferred from the {@link Mapper} to
+ the {@link Reducer}, leading to better performance.</p>
+
+ <p>Typically the combiner is same as the <code>Reducer</code> for the
+ job i.e. {@link #setReducerClass(Class)}.</p>
+
+ @param theClass the user-defined combiner class used to combine
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on, else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getMapSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for map tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be
+ used for this job for map tasks,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for map tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for map tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getReduceSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for reduce tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used
+ for reduce tasks for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setReduceSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for reduce tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for reduce tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getNumMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job.
+ Defaults to <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumMapTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the number of map tasks for this job.
+
+ <p><i>Note</i>: This is only a <i>hint</i> to the framework. The actual
+ number of spawned map tasks depends on the number of {@link InputSplit}s
+ generated by the job's {@link InputFormat#getSplits(JobConf, int)}.
+
+ A custom {@link InputFormat} is typically used to accurately control
+ the number of map tasks for the job.</p>
+
+ <h4 id="NoOfMaps">How many maps?</h4>
+
+ <p>The number of maps is usually driven by the total size of the inputs
+ i.e. total number of blocks of the input files.</p>
+
+ <p>The right level of parallelism for maps seems to be around 10-100 maps
+ per-node, although it has been set up to 300 or so for very cpu-light map
+ tasks. Task setup takes awhile, so it is best if the maps take at least a
+ minute to execute.</p>
+
+ <p>The default behavior of file-based {@link InputFormat}s is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of input files. However, the {@link FileSystem} blocksize of the
+ input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Thus, if you expect 10TB of input data and have a blocksize of 128MB,
+ you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is
+ used to set it even higher.</p>
+
+ @param n the number of map tasks for this job.
+ @see InputFormat#getSplits(JobConf, int)
+ @see FileInputFormat
+ @see FileSystem#getDefaultBlockSize()
+ @see FileStatus#getBlockSize()]]>
+ </doc>
+ </method>
+ <method name="getNumReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job. Defaults to
+ <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumReduceTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the requisite number of reduce tasks for this job.
+
+ <h4 id="NoOfReduces">How many reduces?</h4>
+
+ <p>The right number of reduces seems to be <code>0.95</code> or
+ <code>1.75</code> multiplied by (&lt;<i>no. of nodes</i>&gt; *
+ <a href="{@docRoot}/../hadoop-default.html#mapred.tasktracker.reduce.tasks.maximum">
+ mapred.tasktracker.reduce.tasks.maximum</a>).
+ </p>
+
+ <p>With <code>0.95</code> all of the reduces can launch immediately and
+ start transfering map outputs as the maps finish. With <code>1.75</code>
+ the faster nodes will finish their first round of reduces and launch a
+ second wave of reduces doing a much better job of load balancing.</p>
+
+ <p>Increasing the number of reduces increases the framework overhead, but
+ increases load balancing and lowers the cost of failures.</p>
+
+ <p>The scaling factors above are slightly less than whole numbers to
+ reserve a few reduce slots in the framework for speculative-tasks, failures
+ etc.</p>
+
+ <h4 id="ReducerNone">Reducer NONE</h4>
+
+ <p>It is legal to set the number of reduce-tasks to <code>zero</code>.</p>
+
+ <p>In this case the output of the map-tasks directly go to distributed
+ file-system, to the path set by
+ {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Also, the
+ framework doesn't sort the map-outputs before writing it out to HDFS.</p>
+
+ @param n the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ map task, as specified by the <code>mapred.map.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ map task.
+
+ @param n the number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ reduce task, as specified by the <code>mapred.reduce.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ reduce task.
+
+ @param n the number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name. This is only used to identify the
+ job to the user.
+
+ @return the job's name, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified job name.
+
+ @param name the job's new name.]]>
+ </doc>
+ </method>
+ <method name="getSessionId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified session identifier. The default is the empty string.
+
+ The session identifier is used to tag metric data that is reported to some
+ performance metrics system via the org.apache.hadoop.metrics API. The
+ session identifier is intended, in particular, for use by Hadoop-On-Demand
+ (HOD) which allocates a virtual Hadoop cluster dynamically and transiently.
+ HOD will set the session identifier by modifying the hadoop-site.xml file
+ before starting the cluster.
+
+ When not running under HOD, this identifer is expected to remain set to
+ the empty string.
+
+ @return the session identifier, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setSessionId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sessionId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified session identifier.
+
+ @param sessionId the new session id.]]>
+ </doc>
+ </method>
+ <method name="setMaxTaskFailuresPerTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="noFailures" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds <code>noFailures</code>, the
+ tasktracker is <i>blacklisted</i> for this job.
+
+ @param noFailures maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxTaskFailuresPerTracker" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Expert: Get the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds this, the tasktracker is
+ <i>blacklisted</i> for this job.
+
+ @return the maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of map tasks that can fail without
+ the job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed map-task results in
+ the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the maximum percentage of map tasks that can fail without the
+ job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts} attempts
+ before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of reduce tasks that can fail without
+ the job being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed reduce-task results
+ in the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum percentage of reduce tasks that can fail without the job
+ being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="prio" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Set {@link JobPriority} for this job.
+
+ @param prio the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link JobPriority} for this job.
+
+ @return the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getProfileEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get whether the task profiling is enabled.
+ @return true if some tasks will be profiled]]>
+ </doc>
+ </method>
+ <method name="setProfileEnabled"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the system should collect profiler information for some of
+ the tasks in this job? The information is stored in the user log
+ directory.
+ @param newValue true means it should be gathered]]>
+ </doc>
+ </method>
+ <method name="getProfileParams" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the profiler configuration arguments.
+
+ The default value for this property is
+ "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s"
+
+ @return the parameters to pass to the task child to configure profiling]]>
+ </doc>
+ </method>
+ <method name="setProfileParams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the profiler configuration arguments. If the string contains a '%s' it
+ will be replaced with the name of the profiling output file when the task
+ runs.
+
+ This value is passed to the task child JVM on the command line.
+
+ @param value the configuration string]]>
+ </doc>
+ </method>
+ <method name="getProfileTaskRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <doc>
+ <![CDATA[Get the range of maps or reduces to profile.
+ @param isMap is the task a map?
+ @return the task ranges]]>
+ </doc>
+ </method>
+ <method name="setProfileTaskRange"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <param name="newValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the ranges of maps or reduces to profile. setProfileEnabled(true)
+ must also be called.
+ @param newValue a set of integer ranges of the map ids]]>
+ </doc>
+ </method>
+ <method name="setMapDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the map tasks fail.
+
+ <p>The debug script can aid debugging of failed map tasks. The script is
+ given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script needs to be symlinked. </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setMapDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param mDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getMapDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the map task's debug script.
+
+ @return the debug Script for the mapred job for failed map tasks.
+ @see #setMapDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="setReduceDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the reduce tasks fail.
+
+ <p>The debug script can aid debugging of failed reduce tasks. The script
+ is given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script file needs to be symlinked </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setReduceDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param rDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getReduceDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reduce task's debug Script
+
+ @return the debug script for the mapred job for failed reduce tasks.
+ @see #setReduceDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="getJobEndNotificationURI" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ @return the job end notification uri, <code>null</code> if it hasn't
+ been set.
+ @see #setJobEndNotificationURI(String)]]>
+ </doc>
+ </method>
+ <method name="setJobEndNotificationURI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ <p>The uri can contain 2 special parameters: <tt>$jobId</tt> and
+ <tt>$jobStatus</tt>. Those, if present, are replaced by the job's
+ identifier and completion-status respectively.</p>
+
+ <p>This is typically used by application-writers to implement chaining of
+ Map-Reduce jobs in an <i>asynchronous manner</i>.</p>
+
+ @param uri the job end notification uri
+ @see JobStatus
+ @see <a href="{@docRoot}/org/apache/hadoop/mapred/JobClient.html#JobCompletionAndChaining">Job Completion and Chaining</a>]]>
+ </doc>
+ </method>
+ <method name="getJobLocalDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get job-specific shared directory for use as scratch space
+
+ <p>
+ When a job starts, a shared directory is created at location
+ <code>
+ ${mapred.local.dir}/taskTracker/jobcache/$jobid/work/ </code>.
+ This directory is exposed to the users through
+ <code>job.local.dir </code>.
+ So, the tasks can use this space
+ as scratch space and share files among them. </p>
+ This value is available as System property also.
+
+ @return The localized job specific shared directory]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A map/reduce job configuration.
+
+ <p><code>JobConf</code> is the primary interface for a user to describe a
+ map-reduce job to the Hadoop framework for execution. The framework tries to
+ faithfully execute the job as-is described by <code>JobConf</code>, however:
+ <ol>
+ <li>
+ Some configuration parameters might have been marked as
+ <a href="{@docRoot}/org/apache/hadoop/conf/Configuration.html#FinalParams">
+ final</a> by administrators and hence cannot be altered.
+ </li>
+ <li>
+ While some job parameters are straight-forward to set
+ (e.g. {@link #setNumReduceTasks(int)}), some parameters interact subtly
+ rest of the framework and/or job-configuration and is relatively more
+ complex for the user to control finely (e.g. {@link #setNumMapTasks(int)}).
+ </li>
+ </ol></p>
+
+ <p><code>JobConf</code> typically specifies the {@link Mapper}, combiner
+ (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and
+ {@link OutputFormat} implementations to be used etc.
+
+ <p>Optionally <code>JobConf</code> is used to specify other advanced facets
+ of the job such as <code>Comparator</code>s to be used, files to be put in
+ the {@link DistributedCache}, whether or not intermediate and/or job outputs
+ are to be compressed (and how), debugability via user-provided scripts
+ ( {@link #setMapDebugScript(String)}/{@link #setReduceDebugScript(String)}),
+ for doing post-processing on task logs, task's stdout, stderr, syslog.
+ and etc.</p>
+
+ <p>Here is an example on how to configure a job via <code>JobConf</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ FileInputFormat.setInputPaths(job, new Path("in"));
+ FileOutputFormat.setOutputPath(job, new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setCombinerClass(MyJob.MyReducer.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ job.setInputFormat(SequenceFileInputFormat.class);
+ job.setOutputFormat(SequenceFileOutputFormat.class);
+ </pre></blockquote></p>
+
+ @see JobClient
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobConf -->
+ <!-- start interface org.apache.hadoop.mapred.JobConfigurable -->
+ <interface name="JobConfigurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Initializes a new instance from a {@link JobConf}.
+
+ @param job the configuration]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[That what may be configured.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobConfigurable -->
+ <!-- start class org.apache.hadoop.mapred.JobEndNotifier -->
+ <class name="JobEndNotifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobEndNotifier"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="startNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="stopNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="registerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ <method name="localRunnerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobEndNotifier -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory -->
+ <class name="JobHistory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="hostname" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Initialize JobHistory files.
+ @param conf Jobconf of the job tracker.
+ @param hostname jobtracker's hostname
+ @return true if intialized properly
+ false otherwise]]>
+ </doc>
+ </method>
+ <method name="parseHistoryFromFS"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="l" type="org.apache.hadoop.mapred.JobHistory.Listener"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parses history file and invokes Listener.handle() for
+ each line of history. It can be used for looking through history
+ files for specific items without having to keep whole history in memory.
+ @param path path to history file
+ @param l Listener for history events
+ @param fs FileSystem where history file is present
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isDisableHistory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns history disable status. by default history is enabled so this
+ method returns false.
+ @return true if history logging is disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="setDisableHistory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="disableHistory" type="boolean"/>
+ <doc>
+ <![CDATA[Enable/disable history logging. Default value is false, so history
+ is enabled by default.
+ @param disableHistory true if history should be disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JOBTRACKER_START_TIME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provides methods for writing to and reading from job history.
+ Job History works in an append mode, JobHistory and its inner classes provide methods
+ to log job events.
+
+ JobHistory is split into multiple files, format of each file is plain text where each line
+ is of the format [type (key=value)*], where type identifies the type of the record.
+ Type maps to UID of one of the inner classes of this class.
+
+ Job history is maintained in a master index which contains star/stop times of all jobs with
+ a few other job level properties. Apart from this each job's history is maintained in a seperate history
+ file. name of job history files follows the format jobtrackerId_jobid
+
+ For parsing the job history it supports a listener based interface where each line is parsed
+ and passed to listener. The listener can create an object model of history or look for specific
+ events and discard rest of the history.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <class name="JobHistory.HistoryCleaner" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobHistory.HistoryCleaner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Cleans up history data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Delete history files older than one month. Update master index and remove all
+ jobs older than one month. Also if a job tracker has no jobs in last one month
+ remove reference to the job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <class name="JobHistory.JobInfo" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.JobInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create new JobInfo]]>
+ </doc>
+ </constructor>
+ <method name="getAllTasks" return="java.util.Map&lt;java.lang.String, org.apache.hadoop.mapred.JobHistory.Task&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all map and reduce tasks <taskid-Task>.]]>
+ </doc>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the path of the locally stored job file
+ @param jobId id of the job
+ @return the path of the job file on the local file system]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFile" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the path of the job-history
+ log file.
+
+ @param logFile path of the job-history file
+ @return URL encoded path
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL encoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="decodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to decode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL decoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logSubmitted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="jobConfPath" type="java.lang.String"/>
+ <param name="submitTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Log job submitted event to history. Creates a new file in history
+ for the job. if history file creation fails, it disables history
+ for all other events.
+ @param jobId job id assigned by job tracker.
+ @param jobConf job conf of the job
+ @param jobConfPath path to job conf xml file in HDFS.
+ @param submitTime time when job tracker received the job
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs launch time of job.
+ @param jobId job id, assigned by jobtracker.
+ @param startTime start time of job.
+ @param totalMaps total maps assigned by jobtracker.
+ @param totalReduces total reduces.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <param name="failedMaps" type="int"/>
+ <param name="failedReduces" type="int"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log job finished. closes the job file in history.
+ @param jobId job id, assigned by jobtracker.
+ @param finishTime finish time of job in ms.
+ @param finishedMaps no of maps successfully finished.
+ @param finishedReduces no of reduces finished sucessfully.
+ @param failedMaps no of failed map tasks.
+ @param failedReduces no of failed reduce tasks.
+ @param counters the counters from the job]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs job failed event. Closes the job history log file.
+ @param jobid job id
+ @param timestamp time when job failure was detected in ms.
+ @param finishedMaps no finished map tasks.
+ @param finishedReduces no of finished reduce tasks.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to job start, finish or failure.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <class name="JobHistory.Keys" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.Keys&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Keys[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Keys"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Job history files contain key="value" pairs, where keys belong to this enum.
+ It acts as a global namespace for all keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <!-- start interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <interface name="JobHistory.Listener" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="handle"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recType" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"/>
+ <param name="values" type="java.util.Map&lt;org.apache.hadoop.mapred.JobHistory.Keys, java.lang.String&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Callback method for history parser.
+ @param recType type of record, which is the first entry in the line.
+ @param values a map of key-value pairs as thry appear in history.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Callback interface for reading back log events from JobHistory. This interface
+ should be implemented and passed to JobHistory.parseHistory()]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <class name="JobHistory.MapAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.MapAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskAttemptId" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of this map task attempt.
+ @param jobId job id
+ @param taskId task id
+ @param taskAttemptId task attempt id
+ @param startTime start time of task attempt as reported by task tracker.
+ @param hostName host name of the task attempt.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskAttemptId" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finish time of map task attempt.
+ @param jobId job id
+ @param taskId task id
+ @param taskAttemptId task attempt id
+ @param finishTime finish time
+ @param hostName host name]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskAttemptId" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt failed event.
+ @param jobId jobid
+ @param taskId taskid
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskAttemptId" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt killed event.
+ @param jobId jobid
+ @param taskId taskid
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <class name="JobHistory.RecordTypes" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.RecordTypes&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.RecordTypes[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Record types are identifiers for each line of log in history files.
+ A record type appears as the first token in a single line of log.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <class name="JobHistory.ReduceAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.ReduceAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskAttemptId" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of Reduce task attempt.
+ @param jobId job id
+ @param taskId task id (tip)
+ @param taskAttemptId task attempt id
+ @param startTime start time
+ @param hostName host name]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskAttemptId" type="java.lang.String"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finished event of this task.
+ @param jobId job id
+ @param taskId task id
+ @param taskAttemptId task attempt id
+ @param shuffleFinished shuffle finish time
+ @param sortFinished sort finish time
+ @param finishTime finish time of task
+ @param hostName host name where task attempt executed]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskAttemptId" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log failed reduce task attempt.
+ @param jobId job id
+ @param taskId task id
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskAttemptId" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log killed reduce task attempt.
+ @param jobId job id
+ @param taskId task id
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Task -->
+ <class name="JobHistory.Task" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.Task"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Log start time of task (TIP).
+ @param jobId job id
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param startTime startTime of tip.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finish time of task.
+ @param jobId job id
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param finishTime finish timeof task in ms]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log job failed event.
+ @param jobId jobid
+ @param taskId task id
+ @param taskType MAP or REDUCE.
+ @param time timestamp when job failed detected.
+ @param error error message for failure.]]>
+ </doc>
+ </method>
+ <method name="getTaskAttempts" return="java.util.Map&lt;java.lang.String, org.apache.hadoop.mapred.JobHistory.TaskAttempt&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all task attempts for this task. <task attempt id - TaskAttempt>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to Task's start, finish or failure.
+ All events logged by this class are logged in a separate file per job in
+ job tracker history. These events map to TIPs in jobtracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Task -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <class name="JobHistory.TaskAttempt" extends="org.apache.hadoop.mapred.JobHistory.Task"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.TaskAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Base class for Map and Reduce TaskAttempts.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Values -->
+ <class name="JobHistory.Values" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.Values&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Values[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Values"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[This enum contains some of the values commonly used by history log events.
+ since values in history can only be strings - Values.name() is used in
+ most places in history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Values -->
+ <!-- start class org.apache.hadoop.mapred.JobPriority -->
+ <class name="JobPriority" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobPriority&gt;"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobPriority[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Used to describe the priority of the running job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobPriority -->
+ <!-- start class org.apache.hadoop.mapred.JobProfile -->
+ <class name="JobProfile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobProfile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an empty {@link JobProfile}.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a {@link JobProfile} the userid, jobid,
+ job config-file, job-details url and job name.
+
+ @param user userid of the person who submitted the job.
+ @param jobid id of the job.
+ @param jobFile job configuration file.
+ @param url link to the web-ui for details of the job.
+ @param name user-specified job name.]]>
+ </doc>
+ </constructor>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user id.]]>
+ </doc>
+ </method>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job id.]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configuration file for the job.]]>
+ </doc>
+ </method>
+ <method name="getURL" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the link to the web-ui for details of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A JobProfile is a MapReduce primitive. Tracks a job,
+ whether living or dead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobProfile -->
+ <!-- start class org.apache.hadoop.mapred.JobShell -->
+ <class name="JobShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run method from Tool]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provide command line parsing for JobSubmission
+ job submission looks like
+ hadoop jar -libjars <comma seperated jars> -archives <comma seperated archives>
+ -files <comma seperated files> inputjar args]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobShell -->
+ <!-- start class org.apache.hadoop.mapred.JobStatus -->
+ <class name="JobStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobStatus" type="java.lang.String, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job]]>
+ </doc>
+ </constructor>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The jobid of the Job]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in maps]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in reduce]]>
+ </doc>
+ </method>
+ <method name="getRunState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return running state of the job]]>
+ </doc>
+ </method>
+ <method name="setRunState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Change the current run state of the job.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return start time of the job]]>
+ </doc>
+ </method>
+ <method name="getUsername" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the username of the job]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SUCCEEDED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PREP" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Describes the current status of a job. This is
+ not intended to be a comprehensive piece of data.
+ For that, look at JobProfile.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobStatus -->
+ <!-- start interface org.apache.hadoop.mapred.JobSubmissionProtocol -->
+ <interface name="JobSubmissionProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="getNewJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Allocate a name for the job.
+ @return a unique job name for submitting jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a Job for execution. Returns the latest profile for
+ that job.
+ The job files should be submitted in <b>system-dir</b>/<b>jobName</b>.]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the current status of the cluster
+ @return summary of the state of the cluster]]>
+ </doc>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill the indicated job]]>
+ </doc>
+ </method>
+ <method name="killTask" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill indicated task attempt.
+ @param taskId the id of the task to kill.
+ @param shouldFail if true the task is failed and added to failed tasks list, otherwise
+ it is just killed, w/o affecting job failure status.]]>
+ </doc>
+ </method>
+ <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Grab a handle to a job that is already known to the JobTracker.
+ @return Profile of the job, or null if not found.]]>
+ </doc>
+ </method>
+ <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Grab a handle to a job that is already known to the JobTracker.
+ @return Status of the job, or null if not found.]]>
+ </doc>
+ </method>
+ <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Grab the current job counters]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Grab a bunch of info on the map tasks that make up the job]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Grab a bunch of info on the reduce tasks that make up the job]]>
+ </doc>
+ </method>
+ <method name="getFilesystemName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A MapReduce system always operates on a single filesystem. This
+ function returns the fs name. ('local' if the localfs; 'addr:port'
+ if dfs). The client can then copy files into the right locations
+ prior to submitting the job.]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are not completed and not failed
+ @return array of JobStatus for the running/to-be-run
+ jobs.]]>
+ </doc>
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get all the jobs submitted.
+ @return array of JobStatus for the submitted jobs]]>
+ </doc>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxEvents" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get task completion events for the jobid, starting from fromEventId.
+ Returns empty aray if no events are available.
+ @param jobid job id
+ @param fromEventId event id to start from.
+ @param maxEvents the max number of events we want to look at
+ @return array of task completion events.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="tipId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the diagnostics for a given task in a given job
+ @param jobId the id of the job
+ @return an array of the diagnostic messages]]>
+ </doc>
+ </method>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Protocol that a JobClient and the central JobTracker use to communicate. The
+ JobClient can use these methods to submit a Job for execution, and learn about
+ the current system status.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobSubmissionProtocol -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker -->
+ <class name="JobTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.InterTrackerProtocol"/>
+ <implements name="org.apache.hadoop.mapred.JobSubmissionProtocol"/>
+ <method name="startTracker" return="org.apache.hadoop.mapred.JobTracker"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker with given configuration.
+
+ The conf will be modified to reflect the actual ports on which
+ the JobTracker is up and running if the user passes the port as
+ <code>zero</code>.
+
+ @param conf configuration for the JobTracker.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="offerService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Run forever]]>
+ </doc>
+ </method>
+ <method name="getTotalSubmissions" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobTrackerMachine" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTrackerIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the unique identifier (ie. timestamp) of this job tracker start.
+ @return a string with a unique identifier]]>
+ </doc>
+ </method>
+ <method name="getTrackerPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="runningJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRunningJobs" return="java.util.List&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version that is called from a timer thread, and therefore needs to be
+ careful to synchronize.]]>
+ </doc>
+ </method>
+ <method name="failedJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="completedJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="taskTrackers" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskTracker" return="org.apache.hadoop.mapred.TaskTrackerStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trackerID" type="java.lang.String"/>
+ </method>
+ <method name="resolveAndAddToTopology" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getNodesAtMaxLevel" return="java.util.Collection&lt;org.apache.hadoop.net.Node&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a collection of nodes at the max level]]>
+ </doc>
+ </method>
+ <method name="getParentNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <param name="level" type="int"/>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the Node in the network topology that corresponds to the hostname]]>
+ </doc>
+ </method>
+ <method name="getNumTaskCacheLevels" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumResolvedTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="heartbeat" return="org.apache.hadoop.mapred.HeartbeatResponse"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskTrackerStatus"/>
+ <param name="initialContact" type="boolean"/>
+ <param name="acceptNewTasks" type="boolean"/>
+ <param name="responseId" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The periodic heartbeat mechanism between the {@link TaskTracker} and
+ the {@link JobTracker}.
+
+ The {@link JobTracker} processes the status information sent by the
+ {@link TaskTracker} and responds with instructions to start/stop
+ tasks or jobs, and also 'reset' instructions during contingencies.]]>
+ </doc>
+ </method>
+ <method name="getFilesystemName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Grab the local fs name]]>
+ </doc>
+ </method>
+ <method name="reportTaskTrackerError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTracker" type="java.lang.String"/>
+ <param name="errorClass" type="java.lang.String"/>
+ <param name="errorMessage" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNewJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Allocates a new JobId string.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[JobTracker.submitJob() kicks off a new job.
+
+ Create a 'JobInProgress' object, which contains both JobProfile
+ and JobStatus. Those two sub-objects are sometimes shipped outside
+ of the JobTracker. But JobInProgress adds info that's useful for
+ the JobTracker alone.
+
+ We add the JIP to the jobInitQueue, which is processed
+ asynchronously to handle split-computation and build up
+ the right TaskTracker/Block mapping.]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxEvents" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="tipId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the diagnostics for a given task
+ @param jobId the id of the job
+ @param tipId the id of the tip
+ @param taskId the id of the task
+ @return an array of the diagnostic messages]]>
+ </doc>
+ </method>
+ <method name="killTask" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a Task to be killed]]>
+ </doc>
+ </method>
+ <method name="getAssignedTracker" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get tracker name for a given task id.
+ @param taskId the name of the task
+ @return The name of the task tracker]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.JobInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the localized job file path on the job trackers local file system
+ @param jobId id of the job
+ @return the path of the job conf file on the local file system]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker process. This is used only for debugging. As a rule,
+ JobTracker should be run as part of the DFS Namenode process.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[JobTracker is the central location for submitting and
+ tracking MR jobs in a network environment.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <class name="JobTracker.IllegalStateException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobTracker.IllegalStateException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A client tried to submit a job before the Job Tracker was ready.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.State -->
+ <class name="JobTracker.State" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobTracker.State&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobTracker.State[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.State -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <class name="KeyValueLineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="KeyValueLineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="findSeparator" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="sep" type="byte"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class treats a line in the input as a key/value pair separated by a
+ separator character. The separator can be specified in config file
+ under the attribute name key.value.separator.in.input.line. The default
+ separator is the tab character ('\t').]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <class name="KeyValueTextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="KeyValueTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Each line
+ is divided into key and value parts by a separator byte. If no such a byte
+ exists, the key will be the entire line and value will be empty.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader -->
+ <class name="LineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="LineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.LongWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.LongWritable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the progress within the split]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Treats keys as offset in file and value as line.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <class name="LineRecordReader.LineReader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LineRecordReader.LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ <code>io.file.buffer.size</code> specified in the given
+ <code>Configuration</code>.
+ @param in input stream
+ @param conf configuration
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the underlying stream.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides a line reader from an input stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <!-- start class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <class name="MapFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.MapFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getEntry" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readers" type="org.apache.hadoop.io.MapFile.Reader[]"/>
+ <param name="partitioner" type="org.apache.hadoop.mapred.Partitioner&lt;K, V&gt;"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an entry from output generated by this class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link MapFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.Mapper -->
+ <interface name="Mapper" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1"/>
+ <param name="value" type="V1"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Maps a single input key/value pair into an intermediate key/value pair.
+
+ <p>Output pairs need not be of the same types as input pairs. A given
+ input pair may map to zero or many output pairs. Output pairs are
+ collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the input key.
+ @param value the input value.
+ @param output collects mapped keys and values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Maps input key/value pairs to a set of intermediate key/value pairs.
+
+ <p>Maps are the individual tasks which transform input records into a
+ intermediate records. The transformed intermediate records need not be of
+ the same type as the input records. A given input pair may map to zero or
+ many output pairs.</p>
+
+ <p>The Hadoop Map-Reduce framework spawns one map task for each
+ {@link InputSplit} generated by the {@link InputFormat} for the job.
+ <code>Mapper</code> implementations can access the {@link JobConf} for the
+ job via the {@link JobConfigurable#configure(JobConf)} and initialize
+ themselves. Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p>The framework then calls
+ {@link #map(Object, Object, OutputCollector, Reporter)}
+ for each key/value pair in the <code>InputSplit</code> for that task.</p>
+
+ <p>All intermediate values associated with a given output key are
+ subsequently grouped by the framework, and passed to a {@link Reducer} to
+ determine the final output. Users can control the grouping by specifying
+ a <code>Comparator</code> via
+ {@link JobConf#setOutputKeyComparatorClass(Class)}.</p>
+
+ <p>The grouped <code>Mapper</code> outputs are partitioned per
+ <code>Reducer</code>. Users can control which keys (and hence records) go to
+ which <code>Reducer</code> by implementing a custom {@link Partitioner}.
+
+ <p>Users can optionally specify a <code>combiner</code>, via
+ {@link JobConf#setCombinerClass(Class)}, to perform local aggregation of the
+ intermediate outputs, which helps to cut down the amount of data transferred
+ from the <code>Mapper</code> to the <code>Reducer</code>.
+
+ <p>The intermediate, grouped outputs are always stored in
+ {@link SequenceFile}s. Applications can specify if and how the intermediate
+ outputs are to be compressed and which {@link CompressionCodec}s are to be
+ used via the <code>JobConf</code>.</p>
+
+ <p>If the job has
+ <a href="{@docRoot}/org/apache/hadoop/mapred/JobConf.html#ReducerNone">zero
+ reduces</a> then the output of the <code>Mapper</code> is directly written
+ to the {@link FileSystem} without grouping by keys.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyMapper&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Mapper&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String mapTaskId;
+ private String inputFile;
+ private int noRecords = 0;
+
+ public void configure(JobConf job) {
+ mapTaskId = job.get("mapred.task.id");
+ inputFile = job.get("mapred.input.file");
+ }
+
+ public void map(K key, V val,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ // reporter.progress();
+
+ // Process some more
+ // ...
+ // ...
+
+ // Increment the no. of &lt;key, value&gt; pairs processed
+ ++noRecords;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 records update application-level status
+ if ((noRecords%100) == 0) {
+ reporter.setStatus(mapTaskId + " processed " + noRecords +
+ " from input-file: " + inputFile);
+ }
+
+ // Output the result
+ output.collect(key, val);
+ }
+ }
+ </pre></blockquote></p>
+
+ <p>Applications may write a custom {@link MapRunnable} to exert greater
+ control on map processing e.g. multi-threaded <code>Mapper</code>s etc.</p>
+
+ @see JobConf
+ @see InputFormat
+ @see Partitioner
+ @see Reducer
+ @see MapReduceBase
+ @see MapRunnable
+ @see SequenceFile]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Mapper -->
+ <!-- start class org.apache.hadoop.mapred.MapReduceBase -->
+ <class name="MapReduceBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="MapReduceBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for {@link Mapper} and {@link Reducer} implementations.
+
+ <p>Provides default no-op implementations for a few methods, most non-trivial
+ applications need to override some of them.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapReduceBase -->
+ <!-- start interface org.apache.hadoop.mapred.MapRunnable -->
+ <interface name="MapRunnable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start mapping input <tt>&lt;key, value&gt;</tt> pairs.
+
+ <p>Mapping of input records to output records is complete when this method
+ returns.</p>
+
+ @param input the {@link RecordReader} to read the input records.
+ @param output the {@link OutputCollector} to collect the outputrecords.
+ @param reporter {@link Reporter} to report progress, status-updates etc.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Expert: Generic interface for {@link Mapper}s.
+
+ <p>Custom implementations of <code>MapRunnable</code> can exert greater
+ control on map processing e.g. multi-threaded, asynchronous mappers etc.</p>
+
+ @see Mapper]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.MapRunnable -->
+ <!-- start class org.apache.hadoop.mapred.MapRunner -->
+ <class name="MapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="MapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Default {@link MapRunnable} implementation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapRunner -->
+ <!-- start class org.apache.hadoop.mapred.MapTaskStatus -->
+ <class name="MapTaskStatus" extends="org.apache.hadoop.mapred.TaskStatus"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapTaskStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="MapTaskStatus" type="java.lang.String, float, org.apache.hadoop.mapred.TaskStatus.State, java.lang.String, java.lang.String, java.lang.String, org.apache.hadoop.mapred.TaskStatus.Phase, org.apache.hadoop.mapred.Counters"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getIsMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getShuffleFinishTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSortFinishTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapTaskStatus -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <class name="MultiFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;K, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultiFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s
+ in {@link #getSplits(JobConf, int)} method. Splits are constructed from
+ the files under the input paths. Each split returned contains <i>nearly</i>
+ equal content length. <br>
+ Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)}
+ to construct <code>RecordReader</code>'s for <code>MultiFileSplit</code>'s.
+ @see MultiFileSplit]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileSplit -->
+ <class name="MultiFileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="MultiFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLengths" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array containing the lengths of the files in
+ the split]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the length of the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getNumPaths" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all the Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A sub-collection of input files. Unlike {@link FileSplit}, MultiFileSplit
+ class does not represent a split of a file, but a split of input files
+ into smaller sets. The atomic unit of split is a file. <br>
+ MultiFileSplit can be used to implement {@link RecordReader}'s, with
+ reading one record per file.
+ @see FileSplit
+ @see MultiFileInputFormat]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileSplit -->
+ <!-- start interface org.apache.hadoop.mapred.OutputCollector -->
+ <interface name="OutputCollector" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="collect"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Adds a key/value pair to the output.
+
+ @param key the key to collect.
+ @param value to value to collect.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Collects the <code>&lt;key, value&gt;</code> pairs output by {@link Mapper}s
+ and {@link Reducer}s.
+
+ <p><code>OutputCollector</code> is the generalization of the facility
+ provided by the Map-Reduce framework to collect data output by either the
+ <code>Mapper</code> or the <code>Reducer</code> i.e. intermediate outputs
+ or the output of the job.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputCollector -->
+ <!-- start interface org.apache.hadoop.mapred.OutputFormat -->
+ <interface name="OutputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordWriter} for the given job.
+
+ @param ignored
+ @param job configuration for the job whose output is being written.
+ @param name the unique name for this part of the output.
+ @param progress mechanism for reporting progress while writing to file.
+ @return a {@link RecordWriter} to write the output for the job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check for validity of the output-specification for the job.
+
+ <p>This is to validate the output specification for the job when it is
+ a job is submitted. Typically checks that it does not already exist,
+ throwing an exception when it already exists, so that output is not
+ overwritten.</p>
+
+ @param ignored
+ @param job job configuration.
+ @throws IOException when output should not be attempted]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>OutputFormat</code> describes the output-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the output-specification of the job. For e.g. check that the
+ output directory doesn't already exist.
+ <li>
+ Provide the {@link RecordWriter} implementation to be used to write out
+ the output files of the job. Output files are stored in a
+ {@link FileSystem}.
+ </li>
+ </ol>
+
+ @see RecordWriter
+ @see JobConf]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.OutputFormatBase -->
+ <class name="OutputFormatBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileOutputFormat}">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="OutputFormatBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setCompressOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+ <code>false</code> otherwise]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+ compress the job outputs]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the
+ job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A base class for {@link OutputFormat}.
+ @deprecated Use {@link FileOutputFormat}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputFormatBase -->
+ <!-- start class org.apache.hadoop.mapred.OutputLogFilter -->
+ <class name="OutputLogFilter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.PathFilter"/>
+ <constructor name="OutputLogFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <doc>
+ <![CDATA[This class filters log files from directory given
+ It doesnt accept paths having _logs.
+ This can be used to list paths of output directory as follows:
+ Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
+ new OutputLogFilter()));]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputLogFilter -->
+ <!-- start interface org.apache.hadoop.mapred.Partitioner -->
+ <interface name="Partitioner" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numPartitions" type="int"/>
+ <doc>
+ <![CDATA[Get the paritition number for a given key (hence record) given the total
+ number of partitions i.e. number of reduce-tasks for the job.
+
+ <p>Typically a hash function on a all or a subset of the key.</p>
+
+ @param key the key to be paritioned.
+ @param value the entry value.
+ @param numPartitions the total number of partitions.
+ @return the partition number for the <code>key</code>.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partitions the key space.
+
+ <p><code>Partitioner</code> controls the partitioning of the keys of the
+ intermediate map-outputs. The key (or a subset of the key) is used to derive
+ the partition, typically by a hash function. The total number of partitions
+ is the same as the number of reduce tasks for the job. Hence this controls
+ which of the <code>m</code> reduce tasks the intermediate key (and hence the
+ record) is sent for reduction.</p>
+
+ @see Reducer]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Partitioner -->
+ <!-- start interface org.apache.hadoop.mapred.RecordReader -->
+ <interface name="RecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the next key/value pair from the input for processing.
+
+ @param key the key to read data into
+ @param value the value to read data into
+ @return true iff a key/value was read, false if at EOF]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a key.
+
+ @return a new key object.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a value.
+
+ @return a new value object.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current position in the input.
+
+ @return the current position in the input.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this {@link InputSplit} to future operations.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[How much of the input has the {@link RecordReader} consumed i.e.
+ has been processed by?
+
+ @return progress from <code>0.0</code> to <code>1.0</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordReader</code> reads &lt;key, value&gt; pairs from an
+ {@link InputSplit}.
+
+ <p><code>RecordReader</code>, typically, converts the byte-oriented view of
+ the input, provided by the <code>InputSplit</code>, and presents a
+ record-oriented view for the {@link Mapper} & {@link Reducer} tasks for
+ processing. It thus assumes the responsibility of processing record
+ boundaries and presenting the tasks with keys and values.</p>
+
+ @see InputSplit
+ @see InputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordReader -->
+ <!-- start interface org.apache.hadoop.mapred.RecordWriter -->
+ <interface name="RecordWriter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes a key/value pair.
+
+ @param key the key to write.
+ @param value the value to write.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this <code>RecordWriter</code> to future operations.
+
+ @param reporter facility to report progress.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordWriter</code> writes the output &lt;key, value&gt; pairs
+ to an output file.
+
+ <p><code>RecordWriter</code> implementations write the job outputs to the
+ {@link FileSystem}.
+
+ @see OutputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordWriter -->
+ <!-- start interface org.apache.hadoop.mapred.Reducer -->
+ <interface name="Reducer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="values" type="java.util.Iterator&lt;V2&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K3, V3&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<i>Reduces</i> values for a given key.
+
+ <p>The framework calls this method for each
+ <code>&lt;key, (list of values)></code> pair in the grouped inputs.
+ Output values must be of the same type as input values. Input keys must
+ not be altered. Typically all values are combined into zero or one value.
+ </p>
+
+ <p>Output pairs are collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the key.
+ @param values the list of values to reduce.
+ @param output to collect keys and combined values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reduces a set of intermediate values which share a key to a smaller set of
+ values.
+
+ <p>The number of <code>Reducer</code>s for the job is set by the user via
+ {@link JobConf#setNumReduceTasks(int)}. <code>Reducer</code> implementations
+ can access the {@link JobConf} for the job via the
+ {@link JobConfigurable#configure(JobConf)} method and initialize themselves.
+ Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p><code>Reducer</code> has 3 primary phases:</p>
+ <ol>
+ <li>
+
+ <h4 id="Shuffle">Shuffle</h4>
+
+ <p><code>Reducer</code> is input the grouped output of a {@link Mapper}.
+ In the phase the framework, for each <code>Reducer</code>, fetches the
+ relevant partition of the output of all the <code>Mapper</code>s, via HTTP.
+ </p>
+ </li>
+
+ <li>
+ <h4 id="Sort">Sort</h4>
+
+ <p>The framework groups <code>Reducer</code> inputs by <code>key</code>s
+ (since different <code>Mapper</code>s may have output the same key) in this
+ stage.</p>
+
+ <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
+ being fetched they are merged.</p>
+
+ <h5 id="SecondarySort">SecondarySort</h5>
+
+ <p>If equivalence rules for keys while grouping the intermediates are
+ different from those for grouping keys before reduction, then one may
+ specify a <code>Comparator</code> via
+ {@link JobConf#setOutputValueGroupingComparator(Class)}.Since
+ {@link JobConf#setOutputKeyComparatorClass(Class)} can be used to
+ control how intermediate keys are grouped, these can be used in conjunction
+ to simulate <i>secondary sort on values</i>.</p>
+
+
+ For example, say that you want to find duplicate web pages and tag them
+ all with the url of the "best" known example. You would set up the job
+ like:
+ <ul>
+ <li>Map Input Key: url</li>
+ <li>Map Input Value: document</li>
+ <li>Map Output Key: document checksum, url pagerank</li>
+ <li>Map Output Value: url</li>
+ <li>Partitioner: by checksum</li>
+ <li>OutputKeyComparator: by checksum and then decreasing pagerank</li>
+ <li>OutputValueGroupingComparator: by checksum</li>
+ </ul>
+ </li>
+
+ <li>
+ <h4 id="Reduce">Reduce</h4>
+
+ <p>In this phase the
+ {@link #reduce(Object, Iterator, OutputCollector, Reporter)}
+ method is called for each <code>&lt;key, (list of values)></code> pair in
+ the grouped inputs.</p>
+ <p>The output of the reduce task is typically written to the
+ {@link FileSystem} via
+ {@link OutputCollector#collect(Object, Object)}.</p>
+ </li>
+ </ol>
+
+ <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyReducer&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Reducer&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String reduceTaskId;
+ private int noKeys = 0;
+
+ public void configure(JobConf job) {
+ reduceTaskId = job.get("mapred.task.id");
+ }
+
+ public void reduce(K key, Iterator&lt;V&gt; values,
+ OutputCollector&lt;K, V&gt; output,
+ Reporter reporter)
+ throws IOException {
+
+ // Process
+ int noValues = 0;
+ while (values.hasNext()) {
+ V value = values.next();
+
+ // Increment the no. of values for this key
+ ++noValues;
+
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ if ((noValues%10) == 0) {
+ reporter.progress();
+ }
+
+ // Process some more
+ // ...
+ // ...
+
+ // Output the &lt;key, value&gt;
+ output.collect(key, value);
+ }
+
+ // Increment the no. of &lt;key, list of values&gt; pairs processed
+ ++noKeys;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 keys update application-level status
+ if ((noKeys%100) == 0) {
+ reporter.setStatus(reduceTaskId + " processed " + noKeys);
+ }
+ }
+ }
+ </pre></blockquote></p>
+
+ @see Mapper
+ @see Partitioner
+ @see Reporter
+ @see MapReduceBase]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reducer -->
+ <!-- start class org.apache.hadoop.mapred.ReduceTaskStatus -->
+ <class name="ReduceTaskStatus" extends="org.apache.hadoop.mapred.TaskStatus"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReduceTaskStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ReduceTaskStatus" type="java.lang.String, float, org.apache.hadoop.mapred.TaskStatus.State, java.lang.String, java.lang.String, java.lang.String, org.apache.hadoop.mapred.TaskStatus.Phase, org.apache.hadoop.mapred.Counters"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="clone" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getIsMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getShuffleFinishTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSortFinishTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFetchFailedMaps" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ReduceTaskStatus -->
+ <!-- start interface org.apache.hadoop.mapred.Reporter -->
+ <interface name="Reporter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Progressable"/>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the status description for the task.
+
+ @param status brief description of the current status.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the key, which can be of
+ any {@link Enum} type, by the specified amount.
+
+ @param key key to identify the counter to be incremented. The key can be
+ be any <code>Enum</code>.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="getInputSplit" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+ <doc>
+ <![CDATA[Get the {@link InputSplit} object for a map.
+
+ @return the <code>InputSplit</code> that the map is reading from.
+ @throws UnsupportedOperationException if called outside a mapper]]>
+ </doc>
+ </method>
+ <field name="NULL" type="org.apache.hadoop.mapred.Reporter"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A constant of Reporter type that does nothing.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A facility for Map-Reduce applications to report progress and update
+ counters, status information etc.
+
+ <p>{@link Mapper} and {@link Reducer} can use the <code>Reporter</code>
+ provided to report progress or just indicate that they are alive. In
+ scenarios where the application takes an insignificant amount of time to
+ process individual key/value pairs, this is crucial since the framework
+ might assume that the task has timed-out and kill that task.
+
+ <p>Applications can also update {@link Counters} via the provided
+ <code>Reporter</code> .</p>
+
+ @see Progressable
+ @see Counters]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reporter -->
+ <!-- start interface org.apache.hadoop.mapred.RunningJob -->
+ <interface name="RunningJob" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job identifier.
+
+ @return the job identifier.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the job.
+
+ @return the name of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the path of the submitted job configuration.
+
+ @return the path of the submitted job configuration.]]>
+ </doc>
+ </method>
+ <method name="getTrackingURL" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the URL where some job progress information will be displayed.
+
+ @return the URL where some job progress information will be displayed.]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's map-tasks, as a float between 0.0
+ and 1.0. When all map tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's map-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0
+ and 1.0. When all reduce tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's reduce-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isComplete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job is finished or not.
+ This is a non-blocking call.
+
+ @return <code>true</code> if the job is complete, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isSuccessful" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job completed successfully.
+
+ @return <code>true</code> if the job succeeded, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="waitForCompletion"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Blocks until the job is complete.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill the running job. Blocks until all job tasks have been
+ killed as well. If the job is no longer running, it simply returns.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="startFrom" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get events indicating completion (success/failure) of component tasks.
+
+ @param startFrom index to start fetching events from
+ @return an array of {@link TaskCompletionEvent}s
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill indicated task attempt.
+
+ @param taskId the id of the task to be terminated.
+ @param shouldFail if true the task is failed and added to failed tasks
+ list, otherwise it is just killed, w/o affecting
+ job failure status.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the counters for this job.
+
+ @return the counters for this job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RunningJob</code> is the user-interface to query for details on a
+ running Map-Reduce job.
+
+ <p>Clients can get hold of <code>RunningJob</code> via the {@link JobClient}
+ and then query the running-job for details such as name, configuration,
+ progress etc.</p>
+
+ @see JobClient]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RunningJob -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <class name="SequenceFileAsBinaryInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[InputFormat reading keys, values from SequenceFiles in binary (raw)
+ format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <class name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"/>
+ <constructor name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the key class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getKeyClassName]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the value class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getValueClassName]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.BytesWritable"/>
+ <param name="val" type="org.apache.hadoop.io.BytesWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read raw bytes from a SequenceFile.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Read records from a SequenceFile as binary (raw) bytes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <class name="SequenceFileAsTextInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is similar to SequenceFileInputFormat, except it generates SequenceFileAsTextRecordReader
+ which converts the input keys and values to their String forms by calling toString() method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <class name="SequenceFileAsTextRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="SequenceFileAsTextRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class converts the input keys and values to their String forms by calling toString()
+ method. This class to SequenceFileAsTextInputFormat class is as LineRecordReader
+ class to TextInputFormat class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <class name="SequenceFileInputFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a record reader for the given split
+ @param split file split
+ @param job job configuration
+ @param reporter reporter who sends report to task tracker
+ @return RecordReader]]>
+ </doc>
+ </method>
+ <method name="setFilterClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="filterClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[set the filter class
+
+ @param conf application configuration
+ @param filterClass filter class]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that allows a map/red job to work on a sample of sequence files.
+ The sample is decided by the filter class set by the job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <!-- start interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <interface name="SequenceFileInputFilter.Filter" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[filter function
+ Decide if a record should be filtered or not
+ @param key record key
+ @return true if a record is accepted; return false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[filter interface]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <class name="SequenceFileInputFilter.FilterBase" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.SequenceFileInputFilter.Filter"/>
+ <constructor name="SequenceFileInputFilter.FilterBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[base calss for Filters]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <class name="SequenceFileInputFilter.MD5Filter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.MD5Filter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the filtering frequency in configuration
+
+ @param conf configuration
+ @param frequency filtering frequency]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter according to configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Filtering method
+ If MD5(key) % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(org.apache.hadoop.io.Writable)]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class returns a set of records by examing the MD5 digest of its
+ key against a filtering frequency <i>f</i>. The filtering criteria is
+ MD5(key) % f == 0.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <class name="SequenceFileInputFilter.PercentFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.PercentFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the frequency and stores it in conf
+ @param conf configuration
+ @param frequency filtering frequencey]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter by checking the configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Filtering method
+ If record# % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(org.apache.hadoop.io.Writable)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class returns a percentage of records
+ The percentage is determined by a filtering frequency <i>f</i> using
+ the criteria record# % f == 0.
+ For example, if the frequency is 10, one out of 10 records is returned.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <class name="SequenceFileInputFilter.RegexFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.RegexFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setPattern"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="regex" type="java.lang.String"/>
+ <exception name="PatternSyntaxException" type="java.util.regex.PatternSyntaxException"/>
+ <doc>
+ <![CDATA[Define the filtering regex and stores it in conf
+ @param conf where the regex is set
+ @param regex regex used as a filter]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the Filter by checking the configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Filtering method
+ If key matches the regex, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(org.apache.hadoop.io.Writable)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Records filter by matching key to regex]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <class name="SequenceFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="listPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <class name="SequenceFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.SequenceFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf}
+ @return the {@link CompressionType} for the output {@link SequenceFile},
+ defaulting to {@link CompressionType#RECORD}]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf} to modify
+ @param style the {@link CompressionType} for the output
+ {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <class name="SequenceFileRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"/>
+ <constructor name="SequenceFileRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of key that must be passed to {@link
+ #next(WritableComparable,Writable)}..]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of value that must be passed to {@link
+ #next(WritableComparable,Writable)}..]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="conf" type="org.apache.hadoop.conf.Configuration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An {@link RecordReader} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer -->
+ <class name="StatusHttpServer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer" type="java.lang.String, java.lang.String, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a status server on the given port.
+ The jsp scripts are taken from src/webapps/<name>.
+ @param name The name of the server
+ @param port The port to use on the server
+ @param findPort whether the server should start at the given port and
+ increment by 1 until it finds a free port.]]>
+ </doc>
+ </constructor>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Set a value in the webapp context. These values are available to the jsp
+ pages as "application.getAttribute(name)".
+ @param name The name of the attribute
+ @param value The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="addServlet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="servletClass" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Add a servlet in the server.
+ @param name The name of the servlet (can be passed as null)
+ @param pathSpec The path spec for the servlet
+ @param servletClass The servlet class]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value in the webapp context.
+ @param name The name of the attribute
+ @return The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the port that the server is on
+ @return the port]]>
+ </doc>
+ </method>
+ <method name="setThreads"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="min" type="int"/>
+ <param name="max" type="int"/>
+ </method>
+ <method name="addSslListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="keystore" type="java.lang.String"/>
+ <param name="storPass" type="java.lang.String"/>
+ <param name="keyPass" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Configure an ssl listener on the server.
+ @param addr address to listen on
+ @param keystore location of the keystore
+ @param storPass password for the keystore
+ @param keyPass password for the key]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start the server. Does not wait for the server to start.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[stop the server]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Create a Jetty embedded server to answer http requests. The primary goal
+ is to serve up status information for the server.
+ There are three contexts:
+ "/logs/" -> points to the log directory
+ "/static/" -> points to common static files (src/webapps/static)
+ "/" -> the jsp server code from (src/webapps/<name>)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer.StackServlet -->
+ <class name="StatusHttpServer.StackServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer.StackServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A very simple servlet to serve up a text representation of the current
+ stack traces. It both returns the stacks to the caller and logs them.
+ Currently the stack traces are done sequentially rather than exactly the
+ same data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer.StackServlet -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet -->
+ <class name="StatusHttpServer.TaskGraphServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer.TaskGraphServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="width" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="height" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="ymargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on y axis]]>
+ </doc>
+ </field>
+ <field name="xmargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on x axis]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The servlet that outputs svg graphics for map / reduce task
+ statuses]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <class name="TaskCompletionEvent" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskCompletionEvent"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor for Writable.]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskCompletionEvent" type="int, java.lang.String, int, boolean, org.apache.hadoop.mapred.TaskCompletionEvent.Status, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor. eventId should be created externally and incremented
+ per event for each job.
+ @param eventId event id, event id should be unique and assigned in
+ incrementally, starting from 0.
+ @param taskId task id
+ @param status task's status
+ @param taskTrackerHttp task tracker's host:port for http.]]>
+ </doc>
+ </constructor>
+ <method name="getEventId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns event Id.
+ @return event id]]>
+ </doc>
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id]]>
+ </doc>
+ </method>
+ <method name="getTaskStatus" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns enum Status.SUCESS or Status.FAILURE.
+ @return task tracker status]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerHttp" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[http location of the tasktracker where this task ran.
+ @return http location of tasktracker user logs]]>
+ </doc>
+ </method>
+ <method name="getTaskRunTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns time (in millisec) the task took to complete.]]>
+ </doc>
+ </method>
+ <method name="setTaskRunTime"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskCompletionTime" type="int"/>
+ <doc>
+ <![CDATA[Set the task completion time
+ @param taskCompletionTime time (in millisec) the task took to complete]]>
+ </doc>
+ </method>
+ <method name="setEventId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="eventId" type="int"/>
+ <doc>
+ <![CDATA[set event Id. should be assigned incrementally starting from 0.
+ @param eventId]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId]]>
+ </doc>
+ </method>
+ <method name="setTaskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"/>
+ <doc>
+ <![CDATA[Set task status.
+ @param status]]>
+ </doc>
+ </method>
+ <method name="setTaskTrackerHttp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTrackerHttp" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set task tracker http location.
+ @param taskTrackerHttp]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isMapTask" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="idWithinJob" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="EMPTY_ARRAY" type="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is used to track task completion events on
+ job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <class name="TaskCompletionEvent.Status" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.TaskCompletionEvent.Status&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog -->
+ <class name="TaskLog" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLog"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskLogFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logsRetainHours" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Purge old user logs.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskLogLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the desired maximum length of task's logs.
+ @param conf the job to look in
+ @return the number of bytes to cap the log files at]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ If the tailLength is 0, the entire output will be saved.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="setup" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ Setup commands such as setting memory limit can be passed which
+ will be executed before exec.
+ If the tailLength is 0, the entire output will be saved.
+ @param setup The setup commands for the execed process.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="addCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="isExecutable" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add quotes to each of the command strings and
+ return as a single string
+ @param cmd The command to be quoted
+ @param isExecutable makes shell path if the first
+ argument is executable
+ @return returns The quoted string.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="captureDebugOut" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="debugoutFilename" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture debug script's
+ stdout and stderr to debugout.
+ @param cmd The command and the arguments that should be run
+ @param debugoutFilename The filename that stdout and stderr
+ should be saved to.
+ @return the modified command that should be run
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple logger to handle the task-specific user logs.
+ This class uses the system property <code>hadoop.log.dir</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <class name="TaskLog.LogName" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.TaskLog.LogName&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskLog.LogName[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskLog.LogName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The filter for userlogs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog.Reader -->
+ <class name="TaskLog.Reader" extends="java.io.InputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLog.Reader" type="java.lang.String, org.apache.hadoop.mapred.TaskLog.LogName, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a log file from start to end positions. The offsets may be negative,
+ in which case they are relative to the end of the file. For example,
+ Reader(taskid, kind, 0, -1) is the entire file and
+ Reader(taskid, kind, -4197, -1) is the last 4196 bytes.
+ @param taskid the id of the task to read the log file for
+ @param kind the kind of log to read
+ @param start the offset to read from (negative is relative to tail)
+ @param end the offset to read upto (negative is relative to tail)
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog.Reader -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogAppender -->
+ <class name="TaskLogAppender" extends="org.apache.log4j.FileAppender"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogAppender"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="activateOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Getter/Setter methods for log4j.]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ </method>
+ <method name="getTotalLogFileSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setTotalLogFileSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logSize" type="long"/>
+ </method>
+ <doc>
+ <![CDATA[A simple log4j-appender for the task child's
+ map-reduce system logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogAppender -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogServlet -->
+ <class name="TaskLogServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the logs via http.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A servlet that is run by the TaskTrackers to provide the task logs via http.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskReport -->
+ <class name="TaskReport" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskReport"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The id of the task.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The amount completed, between zero and one.]]>
+ </doc>
+ </method>
+ <method name="getState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The most recent state, reported by a {@link Reporter}.]]>
+ </doc>
+ </method>
+ <method name="getDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A list of error messages.]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A table of counters.]]>
+ </doc>
+ </method>
+ <method name="getFinishTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get finish time of task.
+ @return 0, if finish time was not set else returns finish time.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get start time of task.
+ @return 0 if start time was not set, else start time.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A report on the state of a task.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskReport -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker -->
+ <class name="TaskTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.TaskUmbilicalProtocol"/>
+ <implements name="java.lang.Runnable"/>
+ <constructor name="TaskTracker" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start with the local machine name, and the default JobTracker]]>
+ </doc>
+ </constructor>
+ <method name="getTaskTrackerMetrics" return="org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cleanupStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Removes all contents of temporary storage. Called upon
+ startup, to remove any leftovers from previous run.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close down the TaskTracker and all its components. We must also shutdown
+ any running tasks or threads, and cleanup disk space. A new TaskTracker
+ within the same process space might be restarted, so everything must be
+ clean.]]>
+ </doc>
+ </method>
+ <method name="getJobClient" return="org.apache.hadoop.mapred.InterTrackerProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The connection to the JobTracker, used by the TaskRunner
+ for locating remote files.]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerReportAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the port at which the tasktracker bound to]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The server retry loop.
+ This while-loop attempts to connect to the JobTracker. It only
+ loops when the old TaskTracker has gone bad (its state is
+ stale somehow) and we need to reinitialize everything.]]>
+ </doc>
+ </method>
+ <method name="getTask" return="org.apache.hadoop.mapred.Task"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called upon startup by the child process, to fetch Task data.]]>
+ </doc>
+ </method>
+ <method name="statusUpdate" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="taskStatus" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called periodically to report Task progress, from 0.0 to 1.0.]]>
+ </doc>
+ </method>
+ <method name="reportDiagnosticInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="info" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when the task dies before completion, and we want to report back
+ diagnostic info]]>
+ </doc>
+ </method>
+ <method name="ping" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Child checking to see if we're alive. Normally does nothing.]]>
+ </doc>
+ </method>
+ <method name="done"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="shouldPromote" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The task is done.]]>
+ </doc>
+ </method>
+ <method name="shuffleError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A reduce-task failed to shuffle the map-outputs. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="fsError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A child task had a local filesystem error. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="getMapCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxLocs" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mapOutputLost"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="errorMsg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A completed map task's output has been lost.]]>
+ </doc>
+ </method>
+ <method name="isIdle" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this task tracker idle?
+ @return has this task tracker finished and cleaned up all of its tasks?]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Start the TaskTracker, point toward the indicated JobTracker]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[TaskTracker is a process that starts and tracks MR Tasks
+ in a networked environment. It contacts the JobTracker
+ for Task assignments and reporting results.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.Child -->
+ <class name="TaskTracker.Child" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskTracker.Child"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ </method>
+ <doc>
+ <![CDATA[The main() for child processes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.Child -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <class name="TaskTracker.MapOutputServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskTracker.MapOutputServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in TaskTracker's Jetty to serve the map outputs
+ to other nodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics -->
+ <class name="TaskTracker.TaskTrackerMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics -->
+ <!-- start class org.apache.hadoop.mapred.TextInputFormat -->
+ <class name="TextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="TextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Keys are
+ the position in the file, and values are the line of text..]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat -->
+ <class name="TextOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes plain text files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+ <class name="TextOutputFormat.LineRecordWriter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"/>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+ <doc>
+ <![CDATA[<p>A software framework for easily writing applications which process vast
+amounts of data (multi-terabyte data-sets) parallelly on large clusters
+(thousands of nodes) built of commodity hardware in a reliable, fault-tolerant
+manner.</p>
+
+<p>A Map-Reduce <i>job</i> usually splits the input data-set into independent
+chunks which processed by <i>map</i> tasks in completely parallel manner,
+followed by <i>reduce</i> tasks which aggregating their output. Typically both
+the input and the output of the job are stored in a
+{@link org.apache.hadoop.fs.FileSystem}. The framework takes care of monitoring
+tasks and re-executing failed ones. Since, usually, the compute nodes and the
+storage nodes are the same i.e. Hadoop's Map-Reduce framework and Distributed
+FileSystem are running on the same set of nodes, tasks are effectively scheduled
+on the nodes where data is already present, resulting in very high aggregate
+bandwidth across the cluster.</p>
+
+<p>The Map-Reduce framework operates exclusively on <tt>&lt;key, value&gt;</tt>
+pairs i.e. the input to the job is viewed as a set of <tt>&lt;key, value&gt;</tt>
+pairs and the output as another, possibly different, set of
+<tt>&lt;key, value&gt;</tt> pairs. The <tt>key</tt>s and <tt>value</tt>s have to
+be serializable as {@link org.apache.hadoop.io.Writable}s and additionally the
+<tt>key</tt>s have to be {@link org.apache.hadoop.io.WritableComparable}s in
+order to facilitate grouping by the framework.</p>
+
+<p>Data flow:</p>
+<pre>
+ (input)
+ <tt>&lt;k1, v1&gt;</tt>
+
+ |
+ V
+
+ <b>map</b>
+
+ |
+ V
+
+ <tt>&lt;k2, v2&gt;</tt>
+
+ |
+ V
+
+ <b>combine</b>
+
+ |
+ V
+
+ <tt>&lt;k2, v2&gt;</tt>
+
+ |
+ V
+
+ <b>reduce</b>
+
+ |
+ V
+
+ <tt>&lt;k3, v3&gt;</tt>
+ (output)
+</pre>
+
+<p>Applications typically implement
+{@link org.apache.hadoop.mapred.Mapper#map(Object, Object, OutputCollector, Reporter)}
+and
+{@link org.apache.hadoop.mapred.Reducer#reduce(Object, Iterator, OutputCollector, Reporter)}
+methods. The application-writer also specifies various facets of the job such
+as input and output locations, the <tt>Partitioner</tt>, <tt>InputFormat</tt>
+&amp; <tt>OutputFormat</tt> implementations to be used etc. as
+a {@link org.apache.hadoop.mapred.JobConf}. The client program,
+{@link org.apache.hadoop.mapred.JobClient}, then submits the job to the framework
+and optionally monitors it.</p>
+
+<p>The framework spawns one map task per
+{@link org.apache.hadoop.mapred.InputSplit} generated by the
+{@link org.apache.hadoop.mapred.InputFormat} of the job and calls
+{@link org.apache.hadoop.mapred.Mapper#map(Object, Object, OutputCollector, Reporter)}
+with each &lt;key, value&gt; pair read by the
+{@link org.apache.hadoop.mapred.RecordReader} from the <tt>InputSplit</tt> for
+the task. The intermediate outputs of the maps are then grouped by <tt>key</tt>s
+and optionally aggregated by <i>combiner</i>. The key space of intermediate
+outputs are paritioned by the {@link org.apache.hadoop.mapred.Partitioner}, where
+the number of partitions is exactly the number of reduce tasks for the job.</p>
+
+<p>The reduce tasks fetch the sorted intermediate outputs of the maps, via http,
+merge the &lt;key, value&gt; pairs and call
+{@link org.apache.hadoop.mapred.Reducer#reduce(Object, Iterator, OutputCollector, Reporter)}
+for each &lt;key, list of values&gt; pair. The output of the reduce tasks' is
+stored on the <tt>FileSystem</tt> by the
+{@link org.apache.hadoop.mapred.RecordWriter} provided by the
+{@link org.apache.hadoop.mapred.OutputFormat} of the job.</p>
+
+<p>Map-Reduce application to perform a distributed <i>grep</i>:</p>
+<pre><tt>
+public class Grep extends Configured implements Tool {
+
+ // <i>map: Search for the pattern specified by 'grep.mapper.regex' &amp;</i>
+ // <i>'grep.mapper.regex.group'</i>
+
+ class GrepMapper&lt;K, Text&gt;
+ extends MapReduceBase implements Mapper&lt;K, Text, Text, LongWritable&gt; {
+
+ private Pattern pattern;
+ private int group;
+
+ public void configure(JobConf job) {
+ pattern = Pattern.compile(job.get("grep.mapper.regex"));
+ group = job.getInt("grep.mapper.regex.group", 0);
+ }
+
+ public void map(K key, Text value,
+ OutputCollector&lt;Text, LongWritable&gt; output,
+ Reporter reporter)
+ throws IOException {
+ String text = value.toString();
+ Matcher matcher = pattern.matcher(text);
+ while (matcher.find()) {
+ output.collect(new Text(matcher.group(group)), new LongWritable(1));
+ }
+ }
+ }
+
+ // <i>reduce: Count the number of occurrences of the pattern</i>
+
+ class GrepReducer&lt;K&gt; extends MapReduceBase
+ implements Reducer&lt;K, LongWritable, K, LongWritable&gt; {
+
+ public void reduce(K key, Iterator&lt;LongWritable&gt; values,
+ OutputCollector&lt;K, LongWritable&gt; output,
+ Reporter reporter)
+ throws IOException {
+
+ // sum all values for this key
+ long sum = 0;
+ while (values.hasNext()) {
+ sum += values.next().get();
+ }
+
+ // output sum
+ output.collect(key, new LongWritable(sum));
+ }
+ }
+
+ public int run(String[] args) throws Exception {
+ if (args.length &lt; 3) {
+ System.out.println("Grep &lt;inDir&gt; &lt;outDir&gt; &lt;regex&gt; [&lt;group&gt;]");
+ ToolRunner.printGenericCommandUsage(System.out);
+ return -1;
+ }
+
+ JobConf grepJob = new JobConf(getConf(), Grep.class);
+
+ grepJob.setJobName("grep");
+
+ grepJob.setInputPath(new Path(args[0]));
+ grepJob.setOutputPath(args[1]);
+
+ grepJob.setMapperClass(GrepMapper.class);
+ grepJob.setCombinerClass(GrepReducer.class);
+ grepJob.setReducerClass(GrepReducer.class);
+
+ grepJob.set("mapred.mapper.regex", args[2]);
+ if (args.length == 4)
+ grepJob.set("mapred.mapper.regex.group", args[3]);
+
+ grepJob.setOutputFormat(SequenceFileOutputFormat.class);
+ grepJob.setOutputKeyClass(Text.class);
+ grepJob.setOutputValueClass(LongWritable.class);
+
+ JobClient.runJob(grepJob);
+
+ return 0;
+ }
+
+ public static void main(String[] args) throws Exception {
+ int res = ToolRunner.run(new Configuration(), new Grep(), args);
+ System.exit(res);
+ }
+
+}
+</tt></pre>
+
+<p>Notice how the data-flow of the above grep job is very similar to doing the
+same via the unix pipeline:</p>
+
+<pre>
+cat input/* | grep | sort | uniq -c &gt; out
+</pre>
+
+<pre>
+ input | map | shuffle | reduce &gt; out
+</pre>
+
+<p>Hadoop Map-Reduce applications need not be written in
+Java<small><sup>TM</sup></small> only.
+<a href="../streaming/package-summary.html">Hadoop Streaming</a> is a utility
+which allows users to create and run jobs with any executables (e.g. shell
+utilities) as the mapper and/or the reducer.
+<a href="pipes/package-summary.html">Hadoop Pipes</a> is a
+<a href="http://www.swig.org/">SWIG</a>-compatible <em>C++ API</em> to implement
+Map-Reduce applications (non JNI<small><sup>TM</sup></small> based).</p>
+
+<p>See <a href="http://labs.google.com/papers/mapreduce.html">Google's original
+Map/Reduce paper</a> for background information.</p>
+
+<p><i>Java and JNI are trademarks or registered trademarks of
+Sun Microsystems, Inc. in the United States and other countries.</i></p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.mapred.jobcontrol">
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <class name="Job" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf, java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+ @param jobConf a mapred job configuration representing a job to be executed.
+ @param dependingJobs an array of jobs the current job depends on]]>
+ </doc>
+ </constructor>
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+
+ @param jobConf mapred job configuration representing a job to be executed.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job name of this job]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job name for this job.
+ @param jobName the job name]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job ID of this job]]>
+ </doc>
+ </method>
+ <method name="setJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job ID for this job.
+ @param id the job ID]]>
+ </doc>
+ </method>
+ <method name="getMapredJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred ID of this job]]>
+ </doc>
+ </method>
+ <method name="setMapredJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mapredJobID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job.
+ @param mapredJobID the mapred job ID for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred job conf of this job]]>
+ </doc>
+ </method>
+ <method name="setJobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Set the mapred job conf for this job.
+ @param jobConf the mapred job conf for this job.]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the state of this job]]>
+ </doc>
+ </method>
+ <method name="setState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Set the state for this job.
+ @param state the new state for this job.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the message of this job]]>
+ </doc>
+ </method>
+ <method name="setMessage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="message" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the message for this job.
+ @param message the message for this job.]]>
+ </doc>
+ </method>
+ <method name="getDependingJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the depending jobs of this job]]>
+ </doc>
+ </method>
+ <method name="addDependingJob" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dependingJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a job to this jobs' dependency list. Dependent jobs can only be added while a Job
+ is waiting to run, not during or afterwards.
+
+ @param dependingJob Job that this Job depends on.
+ @return <tt>true</tt> if the Job was added.]]>
+ </doc>
+ </method>
+ <method name="isCompleted" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in a complete state]]>
+ </doc>
+ </method>
+ <method name="isReady" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in READY state]]>
+ </doc>
+ </method>
+ <method name="submit"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Submit this job to mapred. The state becomes RUNNING if submission
+ is successful, FAILED otherwise.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <field name="SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WAITING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEPENDENT_FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class encapsulates a MapReduce job and its dependency. It monitors
+ the states of the depending jobs and updates the state of this job.
+ A job stats in the WAITING state. If it does not have any deoending jobs, or
+ all of the depending jobs are in SUCCESS state, then the job state will become
+ READY. If any depending jobs fail, the job will fail too.
+ When in READY state, the job can be submitted to Hadoop for execution, with
+ the state changing into RUNNING state. From RUNNING state, the job can get into
+ SUCCESS or FAILED state, depending the status of the jon execution.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+ <class name="JobControl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobControl" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a job control for a group of jobs.
+ @param groupName a name identifying this group]]>
+ </doc>
+ </constructor>
+ <method name="getWaitingJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the waiting state]]>
+ </doc>
+ </method>
+ <method name="getRunningJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the running state]]>
+ </doc>
+ </method>
+ <method name="getReadyJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the ready state]]>
+ </doc>
+ </method>
+ <method name="getSuccessfulJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the success state]]>
+ </doc>
+ </method>
+ <method name="getFailedJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="addJob" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="aJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a new job.
+ @param aJob the new job]]>
+ </doc>
+ </method>
+ <method name="addJobs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobs" type="java.util.Collection&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"/>
+ <doc>
+ <![CDATA[Add a collection of jobs
+
+ @param jobs]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the thread state]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[set the thread state to STOPPING so that the
+ thread will stop when it wakes up.]]>
+ </doc>
+ </method>
+ <method name="suspend"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[suspend the running thread]]>
+ </doc>
+ </method>
+ <method name="resume"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[resume the suspended thread]]>
+ </doc>
+ </method>
+ <method name="allFinished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The main loop for the thread.
+ The loop does the following:
+ Check the states of the running jobs
+ Update the states of waiting jobs
+ Submit the jobs in ready state]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a set of MapReduce jobs and its dependency. It tracks
+ the states of the jobs by placing them into different tables according to their
+ states.
+
+ This class provides APIs for the client app to add a job to the group and to get
+ the jobs in the group in different states. When a
+ job is added, an ID unique to the group is assigned to the job.
+
+ This class has a thread that submits jobs when they become ready, monitors the
+ states of the running jobs, and updates the states of jobs based on the state changes
+ of their depending jobs states. The class provides APIs for suspending/resuming
+ the thread,and for stopping the thread.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+ <doc>
+ <![CDATA[<p>Utilities for managing dependent jobs.</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.mapred.join">
+ <!-- start class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <class name="ArrayListBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"/>
+ <constructor name="ArrayListBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayListBackedIterator" type="java.util.ArrayList&lt;X&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. The
+ implementation uses an {@link java.util.ArrayList} to store elements
+ added to it, replaying them as requested.
+ Prefer {@link StreamBackedIterator}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <interface name="ComposableInputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Refinement of InputFormat requiring implementors to provide
+ ComposableRecordReader instead of RecordReader.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <interface name="ComposableRecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"/>
+ <implements name="java.lang.Comparable&lt;org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;&gt;"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key this RecordReader would supply on a call to next(K,V)]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RecordReader into the object provided.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the stream is not empty, but provides no guarantee that
+ a call to next(K,V) will succeed.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[While key-value pairs from this RecordReader match the given key, register
+ them with the JoinCollector provided.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Additional operations required of a RecordReader to participate in a join.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <class name="CompositeInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="CompositeInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Interpret a given string as a composite expression.
+ {@code
+ func ::= <ident>([<func>,]*<func>)
+ func ::= tbl(<class>,"<path>")
+ class ::= @see java.lang.Class#forName(java.lang.String)
+ path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String)
+ }
+ Reads expression from the <tt>mapred.join.expr</tt> property and
+ user-supplied join types from <tt>mapred.join.define.&lt;ident&gt;</tt>
+ types. Paths supplied to <tt>tbl</tt> are given as input paths to the
+ InputFormat class listed.
+ @see #compose(java.lang.String, java.lang.Class, java.lang.String...)]]>
+ </doc>
+ </method>
+ <method name="addDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds the default set of identifiers to the parser.]]>
+ </doc>
+ </method>
+ <method name="validateInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify that this composite has children and that all its children
+ can validate their input.]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a CompositeInputSplit from the child InputFormats by assigning the
+ ith split from each child to the ith composite split.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a CompositeRecordReader for the children of this InputFormat
+ as defined in the init expression.
+ The outermost join need only be composable, not necessarily a composite.
+ Mandating TupleWritable isn't strictly correct.]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given InputFormat class (inf), path (p) return:
+ {@code tbl(<inf>, <p>) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An InputFormat capable of performing joins over a set of data sources sorted
+ and partitioned the same way.
+ @see #setFormat
+
+ A user may define new join types by setting the property
+ <tt>mapred.join.define.&lt;ident&gt;</tt> to a classname. In the expression
+ <tt>mapred.join.expr</tt>, the identifier will be assumed to be a
+ ComposableRecordReader.
+ <tt>mapred.join.keycomparator</tt> can be a classname used to compare keys
+ in the join.
+ @see JoinRecordReader
+ @see MultiFilterRecordReader]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <class name="CompositeInputSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="CompositeInputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CompositeInputSplit" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.mapred.InputSplit"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an InputSplit to this collection.
+ @throws IOException If capacity was not specified during construction
+ or if capacity has been reached.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the aggregate length of all child InputSplits currently added.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the length of ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Collect a set of hosts from all child InputSplits.]]>
+ </doc>
+ </method>
+ <method name="getLocation" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[getLocations from ith InputSplit.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write splits in the following format.
+ {@code
+ <count><class1><class2>...<classn><split1><split2>...<splitn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}
+ @throws IOException If the child InputSplit cannot be read, typically
+ for faliing access checks.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This InputSplit contains a set of child InputSplits. Any InputSplit inserted
+ into this collection must have a public default constructor.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <class name="CompositeRecordReader" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="CompositeRecordReader" type="int, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a RecordReader with <tt>capacity</tt> children to position
+ <tt>id</tt> in the parent reader.
+ The id of a root CompositeRecordReader is -1 by convention, but relying
+ on this is not recommended.]]>
+ </doc>
+ </constructor>
+ <method name="combine" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ </method>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordReaderQueue" return="java.util.PriorityQueue&lt;org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return sorted list of RecordReaders for this composite.]]>
+ </doc>
+ </method>
+ <method name="getComparator" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return comparator defining the ordering for RecordReaders in this
+ composite.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rr" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ? extends V&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a RecordReader to this collection.
+ The id() of a RecordReader determines where in the Tuple its
+ entry will appear. Adding RecordReaders with the same id has
+ undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key for the current join or the value at the top of the
+ RecordReader heap.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the top of this RR into the given object.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if it is possible that this could emit more values.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Pass skip key to child RRs.]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Obtain an iterator over the child RRs apropos of the value type
+ ultimately emitted from this join.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If key provided matches that of this Composite, give JoinCollector
+ iterator over values it may emit.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For all child RRs offering the key provided, obtain an iterator
+ at that position in the JoinCollector.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key of join or head of heap
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new key value common to all child RRs.
+ @throws ClassCastException if key classes differ.]]>
+ </doc>
+ </method>
+ <method name="createInternalValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a value to be used internally for joins.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unsupported (returns zero in all cases).]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all child RRs.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report progress as the minimum of all child RR progress.]]>
+ </doc>
+ </method>
+ <field name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, V, X&gt;.JoinCollector"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="kids" type="org.apache.hadoop.mapred.join.ComposableRecordReader[]"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A RecordReader that can effect joins of RecordReaders sharing a common key
+ type and partitioning.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <class name="InnerJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader&lt;K&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Return true iff the tuple is full (all data sources contain this key).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full inner join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <class name="JoinRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, org.apache.hadoop.io.Writable, org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="JoinRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Emit the next set of key, value pairs as defined by the child
+ RecordReaders and operation associated with this composite RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator wrapping the JoinCollector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite joins returning Tuples of arbitrary Writables.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <class name="JoinRecordReader.JoinDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="JoinRecordReader.JoinDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Since the JoinCollector is effecting our operation, we need only
+ provide an iterator proxy wrapping its operation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <class name="MultiFilterRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, V, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, V&gt;"/>
+ <constructor name="MultiFilterRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="emit" return="V extends org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For each tuple emitted, return a value (typically one of the values
+ in the tuple).
+ Modifying the Writables in the tuple is permitted and unlikely to affect
+ join behavior in most cases, but it is not recommended. It's safer to
+ clone first.]]>
+ </doc>
+ </method>
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Default implementation offers {@link #emit} every Tuple from the
+ collector (the outer join of child RRs).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createValue" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator returning a single value from the tuple.
+ @see MultiFilterDelegationIterator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite join returning values derived from multiple
+ sources, but generally not tuples.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <class name="MultiFilterRecordReader.MultiFilterDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;V&gt;"/>
+ <constructor name="MultiFilterRecordReader.MultiFilterDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy the JoinCollector, but include callback to emit.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <class name="OuterJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader&lt;K&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit everything from the collector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full outer join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <class name="OverrideRecordReader" extends="org.apache.hadoop.mapred.join.MultiFilterRecordReader&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="emit" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit the value with the highest position in the tuple.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instead of filling the JoinCollector with iterators from all
+ data sources, fill only the rightmost for this key.
+ This not only saves space by discarding the other sources, but
+ it also emits the number of key-value pairs in the preferred
+ RecordReader instead of repeating that stream n times, where
+ n is the cardinality of the cross product of the discarded
+ streams for the given key.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Prefer the &quot;rightmost&quot; data source for this key.
+ For example, <tt>override(S1,S2,S3)</tt> will prefer values
+ from S3 over S2, and values from S2 over S1 for all keys
+ emitted from all sources.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser -->
+ <class name="Parser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Very simple shift-reduce parser for join expressions.
+
+ This should be sufficient for the user extension permitted now, but ought to
+ be replaced with a parser generator if more complex grammars are supported.
+ In particular, this &quot;shift-reduce&quot; parser has no states. Each set
+ of formals requires a different internal node type, which is responsible for
+ interpreting the list of tokens it receives. This is sufficient for the
+ current grammar, but it has several annoying properties that might inhibit
+ extension. In particular, parenthesis are always function calls; an
+ algebraic or filter grammar would not only require a node type, but must
+ also work around the internals of this parser.
+
+ For most other cases, adding classes to the hierarchy- particularly by
+ extending JoinRecordReader and MultiFilterRecordReader- is fairly
+ straightforward. One need only override the relevant method(s) (usually only
+ {@link CompositeRecordReader#combine}) and include a property to map its
+ value to an identifier in the parser.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Node -->
+ <class name="Parser.Node" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat"/>
+ <constructor name="Parser.Node" type="java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addIdentifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="ident" type="java.lang.String"/>
+ <param name="mcstrSig" type="java.lang.Class[]"/>
+ <param name="nodetype" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.join.Parser.Node&gt;"/>
+ <param name="cl" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.join.ComposableRecordReader&gt;"/>
+ <exception name="NoSuchMethodException" type="java.lang.NoSuchMethodException"/>
+ <doc>
+ <![CDATA[For a given identifier, add a mapping to the nodetype for the parse
+ tree and to the ComposableRecordReader to be created, including the
+ formals required to invoke the constructor.
+ The nodetype and constructor signature should be filled in from the
+ child node.]]>
+ </doc>
+ </method>
+ <method name="setID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="int"/>
+ </method>
+ <method name="setKeyComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="cmpcl" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"/>
+ </method>
+ <field name="rrCstrMap" type="java.util.Map&lt;java.lang.String, java.lang.reflect.Constructor&lt;? extends org.apache.hadoop.mapred.join.ComposableRecordReader&gt;&gt;"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ident" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="cmpcl" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Node -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <class name="Parser.NodeToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <class name="Parser.NumToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.NumToken" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <class name="Parser.StrToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.StrToken" type="org.apache.hadoop.mapred.join.Parser.TType, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Token -->
+ <class name="Parser.Token" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getType" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Tagged-union type for tokens from the join expression.
+ @see Parser.TType]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Token -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.TType -->
+ <class name="Parser.TType" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.join.Parser.TType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.join.Parser.TType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.TType -->
+ <!-- start interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <interface name="ResetableIterator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True iff a call to next will succeed.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign next value to actual.
+ It is required that elements added to a ResetableIterator be returned in
+ the same order after a call to {@link #reset} (FIFO).
+
+ Note that a call to this may fail for nested joins (i.e. more elements
+ available, but none satisfying the constraints of the join)]]>
+ </doc>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign last value returned to actual.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set iterator to return to the start of its range. Must be called after
+ calling {@link #add} to avoid a ConcurrentModificationException.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an element to the collection of elements to iterate over.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close datasources and release resources. Calling methods on the iterator
+ after calling close has undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Close datasources, but do not release internal resources. Calling this
+ method should permit the object to be reused with a different datasource.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This defines an interface to a stateful Iterator that can replay elements
+ added to it directly.
+ Note that this does not extend {@link java.util.Iterator}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <class name="ResetableIterator.EMPTY" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;U&gt;"/>
+ <constructor name="ResetableIterator.EMPTY"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <!-- start class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <class name="StreamBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"/>
+ <constructor name="StreamBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. This
+ implementation uses a byte array to store elements added to it.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.TupleWritable -->
+ <class name="TupleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="TupleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty tuple with no allocated storage for writables.]]>
+ </doc>
+ </constructor>
+ <constructor name="TupleWritable" type="org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Initialize tuple with storage; unknown whether any of them contain
+ &quot;written&quot; values.]]>
+ </doc>
+ </constructor>
+ <method name="has" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Return true if tuple has an element at the position provided.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith Writable from Tuple.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of children in this Tuple.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator over the elements in this tuple.
+ Note that this doesn't flatten the tuple; one may receive tuples
+ from this iterator.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert Tuple to String as in the following.
+ <tt>[<child1>,<child2>,...,<childn>]</tt>]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes each Writable to <code>out</code>.
+ TupleWritable format:
+ {@code
+ <count><type1><type2>...<typen><obj1><obj2>...<objn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writable type storing multiple {@link org.apache.hadoop.io.Writable}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.TupleWritable -->
+ <!-- start class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+ <class name="WrappedRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, U&gt;"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key at the head of this RR.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RR into the object supplied.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if the RR- including the k,v pair stored in this object-
+ is exhausted.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next k,v pair into the head of this object; return true iff
+ the RR and this are exhausted.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an iterator to the collector at the position occupied by this
+ RecordReader over the values in this stream paired with the key
+ provided (ie register a stream of values from this source matching K
+ with a collector).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write key-value pair at the head of this stream to the objects provided;
+ get next key-value pair from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new key from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="U extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new value from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request progress from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request position from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Forward close request to proxied RR.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key at head of proxied RR
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true iff compareTo(other) retn true.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy class for a RecordReader participating in the join framework.
+ This class keeps track of the &quot;head&quot; key-value pair for the
+ provided RecordReader and keeps a store of values matching a key when
+ this source is participating in a join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+ <doc>
+ <![CDATA[<p>Given a set of sorted datasets keyed with the same class and yielding equal
+partitions, it is possible to effect a join of those datasets prior to the map.
+This could save costs in re-partitioning, sorting, shuffling, and writing out
+data required in the general case.</p>
+
+<h3><a name="Interface"></a>Interface</h3>
+
+<p>The attached code offers the following interface to users of these
+classes.</p>
+
+<table>
+<tr><th>property</th><th>required</th><th>value</th></tr>
+<tr><td>mapred.join.expr</td><td>yes</td>
+ <td>Join expression to effect over input data</td></tr>
+<tr><td>mapred.join.keycomparator</td><td>no</td>
+ <td><tt>WritableComparator</tt> class to use for comparing keys</td></tr>
+<tr><td>mapred.join.define.&lt;ident&gt;</td><td>no</td>
+ <td>Class mapped to identifier in join expression</td></tr>
+</table>
+
+<p>The join expression understands the following grammar:</p>
+
+<pre>func ::= &lt;ident&gt;([&lt;func&gt;,]*&lt;func&gt;)
+func ::= tbl(&lt;class&gt;,"&lt;path&gt;");
+
+</pre>
+
+<p>Operations included in this patch are partitioned into one of two types:
+join operations emitting tuples and "multi-filter" operations emitting a
+single value from (but not necessarily included in) a set of input values.
+For a given key, each operation will consider the cross product of all
+values for all sources at that node.</p>
+
+<p>Identifiers supported by default:</p>
+
+<table>
+<tr><th>identifier</th><th>type</th><th>description</th></tr>
+<tr><td>inner</td><td>Join</td><td>Full inner join</td></tr>
+<tr><td>outer</td><td>Join</td><td>Full outer join</td></tr>
+<tr><td>override</td><td>MultiFilter</td>
+ <td>For a given key, prefer values from the rightmost source</td></tr>
+</table>
+
+<p>A user of this class must set the <tt>InputFormat</tt> for the job to
+<tt>CompositeInputFormat</tt> and define a join expression accepted by the
+preceding grammar. For example, both of the following are acceptable:</p>
+
+<pre>inner(tbl(org.apache.hadoop.mapred.SequenceFileInputFormat.class,
+ "hdfs://host:8020/foo/bar"),
+ tbl(org.apache.hadoop.mapred.SequenceFileInputFormat.class,
+ "hdfs://host:8020/foo/baz"))
+
+outer(override(tbl(org.apache.hadoop.mapred.SequenceFileInputFormat.class,
+ "hdfs://host:8020/foo/bar"),
+ tbl(org.apache.hadoop.mapred.SequenceFileInputFormat.class,
+ "hdfs://host:8020/foo/baz")),
+ tbl(org.apache.hadoop.mapred/SequenceFileInputFormat.class,
+ "hdfs://host:8020/foo/rab"))
+</pre>
+
+<p><tt>CompositeInputFormat</tt> includes a handful of convenience methods to
+aid construction of these verbose statements.</p>
+
+<p>As in the second example, joins may be nested. Users may provide a
+comparator class in the <tt>mapred.join.keycomparator</tt> property to specify
+the ordering of their keys, or accept the default comparator as returned by
+<tt>WritableComparator.get(keyclass)</tt>.</p>
+
+<p>Users can specify their own join operations, typically by overriding
+<tt>JoinRecordReader</tt> or <tt>MultiFilterRecordReader</tt> and mapping that
+class to an identifier in the join expression using the
+<tt>mapred.join.define.<em>ident</em></tt> property, where <em>ident</em> is
+the identifier appearing in the join expression. Users may elect to emit- or
+modify- values passing through their join operation. Consulting the existing
+operations for guidance is recommended. Adding arguments is considerably more
+complex (and only partially supported), as one must also add a <tt>Node</tt>
+type to the parse tree. One is probably better off extending
+<tt>RecordReader</tt> in most cases.</p>
+
+<a href="http://issues.apache.org/jira/browse/HADOOP-2085">JIRA</a>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.mapred.lib">
+ <!-- start class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <class name="FieldSelectionMapReduce" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="FieldSelectionMapReduce"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="val" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to output.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements a mapper/reducer class that can be used to perform
+ field selections in a manner similar to unix cut. The input data is treated
+ as fields separated by a user specified separator (the default value is
+ "\t"). The user can specify a list of fields that form the map output keys,
+ and a list of fields that form the map output values. If the inputformat is
+ TextInputFormat, the mapper will ignore the key to the map function. and the
+ fields are from the value only. Otherwise, the fields are the union of those
+ from the key and those from the value.
+
+ The field separator is under attribute "mapred.data.field.separator"
+
+ The map output field list spec is under attribute "map.output.key.value.fields.spec".
+ The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
+ key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
+ Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
+ (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all
+ the fields starting from field 3. The open range field spec applies value fields only.
+ They have no effect on the key fields.
+
+ Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
+ and use fields 6,5,1,2,3,7 and above for values.
+
+ The reduce output field list spec is under attribute "reduce.output.key.value.fields.spec".
+
+ The reducer extracts output key/value pairs in a similar manner, except that
+ the key is never ignored.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <!-- start class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <class name="HashPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K2, V2&gt;"/>
+ <constructor name="HashPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numReduceTasks" type="int"/>
+ <doc>
+ <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partition keys by their {@link Object#hashCode()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <class name="IdentityMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, K, V&gt;"/>
+ <constructor name="IdentityMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="val" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, V&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to
+ output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implements the identity function, mapping inputs directly to outputs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <class name="IdentityReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;K, V, K, V&gt;"/>
+ <constructor name="IdentityReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="values" type="java.util.Iterator&lt;V&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, V&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes all keys and values directly to output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Performs no reduction, writing all input values directly to the output.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <class name="InverseMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, V, K&gt;"/>
+ <constructor name="InverseMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;V, K&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The inverse function. Input keys and values are swapped.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that swaps keys and values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <class name="KeyFieldBasedPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K2, V2&gt;"/>
+ <constructor name="KeyFieldBasedPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numReduceTasks" type="int"/>
+ <doc>
+ <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <class name="LongSumReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;K, org.apache.hadoop.io.LongWritable, K, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="LongSumReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Reducer} that sums long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <class name="MultipleOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a composite record writer that can write key/value data to different
+ output files
+
+ @param fs
+ the file system to use
+ @param job
+ the job conf for the job
+ @param name
+ the leaf file name for the output file (such as part-00000")
+ @param arg3
+ a progressable for reporting progress.
+ @return a composite record writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="generateLeafFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the leaf name for the output file name. The default behavior does
+ not change the leaf file name (such as part-00000)
+
+ @param name
+ the leaf file name for the output file
+ @return the given leaf file name]]>
+ </doc>
+ </method>
+ <method name="generateFileNameForKeyValue" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the file output file name based on the given key and the leaf file
+ name. The default behavior is that the file name does not depend on the
+ key.
+
+ @param key
+ the key of the output data
+ @param name
+ the leaf file name
+ @return generated file name]]>
+ </doc>
+ </method>
+ <method name="generateActualKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Generate the actual key from the given key/value. The default behavior is that
+ the actual key is equal to the given key
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual key derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="generateActualValue" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Generate the actual value from the given key and value. The default behavior is that
+ the actual value is equal to the given value
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual value derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="getInputFileBasedOutputFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the outfile name based on a given anme and the input file name. If
+ the map input file does not exists (i.e. this is not for a map only job),
+ the given name is returned unchanged. If the config value for
+ "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
+ name is returned unchanged. Otherwise, return a file name consisting of the
+ N trailing legs of the input file name where N is the config value for
+ "num.of.trailing.legs.to.use".
+
+ @param job
+ the job config
+ @param name
+ the output file name
+ @return the outfile name based on a given anme and the input file name.]]>
+ </doc>
+ </method>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param fs
+ the file system to use
+ @param job
+ a job conf object
+ @param name
+ the name of the file over which a record writer object will be
+ constructed
+ @param arg3
+ a progressable object
+ @return A RecordWriter object over the given file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This abstract class extends the OutputFormatBase, allowing to write the
+ output data to different output files. There are three basic use cases for
+ this class.
+
+ Case one: This class is used for a map reduce job with at least one reducer.
+ The reducer wants to write data to different files depending on the actual
+ keys. It is assumed that a key (or value) enocodes the actual key (value)
+ and the desired location for the actual key (value).
+
+ Case two: Tis class is used for a map only job. The job wants to use an
+ output file name that is either a part of the input file name of the input
+ data, or some derivation of it.
+
+ Case three: This class is used for a map only job. The job wants to use an
+ output file name that depends on both the keys and the input file name,]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <class name="MultipleSequenceFileOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleSequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output data
+ to different output files in sequence file output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <class name="MultipleTextOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleTextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output
+ data to different output files in Text output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <class name="MultithreadedMapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="MultithreadedMapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Multithreaded implementation for @link org.apache.hadoop.mapred.MapRunnable.
+ <p>
+ It can be used instead of the default implementation,
+ @link org.apache.hadoop.mapred.MapRunner, when the Map operation is not CPU
+ bound in order to improve throughput.
+ <p>
+ Map implementations using this MapRunnable must be thread-safe.
+ <p>
+ The Map-Reduce job has to be configured to use this MapRunnable class (using
+ the JobConf.setMapRunnerClass method) and
+ the number of thread the thread-pool can use with the
+ <code>mapred.map.multithreadedrunner.threads</code> property, its default
+ value is 10 threads.
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <!-- start class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <class name="NullOutputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="NullOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[Consume all outputs and put them in /dev/null.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <class name="RegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="RegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+ <class name="TokenCountMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="TokenCountMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that maps text values into <token,freq> pairs. Uses
+ {@link StringTokenizer} to break text into tokens.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+ <doc>
+ <![CDATA[<p>Library of generally useful mappers, reducers, and partitioners.</p>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.mapred.lib.aggregate">
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <class name="DoubleValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="DoubleValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a double value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="double"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a double value.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getSum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up a sequence of double
+ values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <class name="LongValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the maximum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <class name="LongValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the minimum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <class name="LongValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getSum" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <class name="StringValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the biggest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <class name="StringValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the smallest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <class name="UniqValueCount" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="UniqValueCount"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UniqValueCount" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor
+ @param maxNum the limit in the number of unique values to keep.]]>
+ </doc>
+ </constructor>
+ <method name="setMaxItems" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <doc>
+ <![CDATA[Set the limit on the number of unique values
+ @param n the desired limit on the number of unique values
+ @return the new limit on the number of unique values]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return the number of unique objects aggregated]]>
+ </doc>
+ </method>
+ <method name="getUniqueItems" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the set of the unique objects]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of the unique objects. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that dedupes a sequence of objects.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <class name="UserDefinedValueAggregatorDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="UserDefinedValueAggregatorDescriptor" type="java.lang.String, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param className the class name of the user defined descriptor class
+ @param job a configure object used for decriptor configuration]]>
+ </doc>
+ </constructor>
+ <method name="createInstance" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="className" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create an instance of the given class
+ @param className the name of the class
+ @return a dynamically created instance of the given class]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pairs
+ by delegating the invocation to the real object.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a wrapper for a user defined value aggregator descriptor.
+ It servs two functions: One is to create an object of ValueAggregatorDescriptor from the
+ name of a user defined class that may be dynamically loaded. The other is to
+ deligate inviokations of generateKeyValPairs function to the created object.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <interface name="ValueAggregator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val the value to be added]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the agregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return an array of values as the outputs of the combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface defines the minimal protocol for value aggregators.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <class name="ValueAggregatorBaseDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="ValueAggregatorBaseDescriptor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="generateEntry" return="java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <param name="id" type="java.lang.String"/>
+ <param name="val" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @param id the aggregation id
+ @param val the val associated with the id to be aggregated
+ @return an Entry whose key is the aggregation id prefixed with
+ the aggregation type.]]>
+ </doc>
+ </method>
+ <method name="generateValueAggregator" return="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @return a value aggregator of the given type.]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate 1 or 2 aggregation-id/value pairs for the given key/value pair.
+ The first id will be of type LONG_VALUE_SUM, with "record_count" as
+ its aggregation id. If the input is a file split,
+ the second id of the same type will be generated too, with the file name
+ as its aggregation id. This achieves the behavior of counting the total number
+ of records in the input data, and the number of records in each input file.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[get the input file name.
+
+ @param job a job configuration object]]>
+ </doc>
+ </method>
+ <field name="UNIQ_VALUE_COUNT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VALUE_HISTOGRAM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputFile" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements the common functionalities of
+ the subclasses of ValueAggregatorDescriptor class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <class name="ValueAggregatorCombiner" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorCombiner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Combiner does not need to configure.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Combines values for a given key.
+ @param key the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values.
+ @param values the values to combine
+ @param output to collect combined values]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic combiner of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <interface name="ValueAggregatorDescriptor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pair.
+ This function is usually called by the mapper of an Aggregate based job.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configure the object
+
+ @param job
+ a JobConf object that may contain the information that can be used
+ to configure the object.]]>
+ </doc>
+ </method>
+ <field name="TYPE_SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ONE" type="org.apache.hadoop.io.Text"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This interface defines the contract a value aggregator descriptor must
+ support. Such a descriptor can be configured with a JobConf object. Its main
+ function is to generate a list of aggregation-id/value pairs. An aggregation
+ id encodes an aggregation type which is used to guide the way to aggregate
+ the value in the reduce/combiner phrase of an Aggregate based job.The mapper in
+ an Aggregate based map/reduce job may create one or more of
+ ValueAggregatorDescriptor objects at configuration time. For each input
+ key/value pair, the mapper will use those objects to create aggregation
+ id/value pairs.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <class name="ValueAggregatorJob" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorJob"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation. Generic hadoop
+ arguments are accepted.
+ @return a JobConf object ready for submission.
+
+ @throws IOException
+ @see GenericOptionsParser]]>
+ </doc>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setAggregatorDescriptors"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create and run an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the main class for creating a map/reduce job using Aggregate
+ framework. The Aggregate is a specialization of map/reduce framework,
+ specilizing for performing various simple aggregations.
+
+ Generally speaking, in order to implement an application using Map/Reduce
+ model, the developer is to implement Map and Reduce functions (and possibly
+ combine function). However, a lot of applications related to counting and
+ statistics computing have very similar characteristics. Aggregate abstracts
+ out the general patterns of these functions and implementing those patterns.
+ In particular, the package provides generic mapper/redducer/combiner classes,
+ and a set of built-in value aggregators, and a generic utility class that
+ helps user create map/reduce jobs using the generic class. The built-in
+ aggregators include:
+
+ sum over numeric values count the number of distinct values compute the
+ histogram of values compute the minimum, maximum, media,average, standard
+ deviation of numeric values
+
+ The developer using Aggregate will need only to provide a plugin class
+ conforming to the following interface:
+
+ public interface ValueAggregatorDescriptor { public ArrayList<Entry>
+ generateKeyValPairs(Object key, Object value); public void
+ configure(JobConfjob); }
+
+ The package also provides a base class, ValueAggregatorBaseDescriptor,
+ implementing the above interface. The user can extend the base class and
+ implement generateKeyValPairs accordingly.
+
+ The primary work of generateKeyValPairs is to emit one or more key/value
+ pairs based on the input key/value pair. The key in an output key/value pair
+ encode two pieces of information: aggregation type and aggregation id. The
+ value will be aggregated onto the aggregation id according the aggregation
+ type.
+
+ This class offers a function to generate a map/reduce job using Aggregate
+ framework. The function takes the following parameters: input directory spec
+ input format (text or sequence file) output directory a file specifying the
+ user plugin class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <class name="ValueAggregatorJobBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K1, V1, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="ValueAggregatorJobBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="logSpec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="aggregatorDescriptorList" type="java.util.ArrayList&lt;org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This abstract class implements some common functionalities of the
+ the generic mapper, reducer and combiner classes of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <class name="ValueAggregatorMapper" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[the map function. It iterates through the value aggregator descriptor
+ list to generate aggregation id/value pairs and emit them.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="org.apache.hadoop.io.Text"/>
+ <param name="arg1" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic mapper of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <class name="ValueAggregatorReducer" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param key
+ the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values. In effect, data
+ driven computing is achieved. It is assumed that each aggregator's
+ getReport method emits appropriate output for the aggregator. This
+ may be further customiized.
+ @value the values to be aggregated]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic reducer of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+ <class name="ValueHistogram" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="ValueHistogram"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add the given val to the aggregator.
+
+ @param val the value to be added. It is expected to be a string
+ in the form of xxxx\tnum, meaning xxxx has num occurrences.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this aggregator.
+ It includes the following basic statistics of the histogram:
+ the number of unique values
+ the minimum value
+ the media value
+ the maximum value
+ the average value
+ the standard deviation]]>
+ </doc>
+ </method>
+ <method name="getReportDetails" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a string representation of the list of value/frequence pairs of
+ the histogram]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a list value/frequence pairs.
+ The return value is expected to be used by the reducer.]]>
+ </doc>
+ </method>
+ <method name="getReportItems" return="java.util.TreeMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a TreeMap representation of the histogram]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that computes the
+ histogram of a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+ <doc>
+ <![CDATA[Classes for performing various counting and aggregations.
+<p />
+<h2><a name="Aggregate"></a>Aggregate framework </h2>
+<p />
+Generally speaking, in order to implement an application using Map/Reduce
+model, the developer needs to implement Map and Reduce functions (and possibly
+Combine function). However, for a lot of applications related to counting and
+statistics computing, these functions have very similar
+characteristics. This provides a package implementing
+those patterns. In particular, the package provides a generic mapper class,
+a reducer class and a combiner class, and a set of built-in value aggregators.
+It also provides a generic utility class, ValueAggregatorJob, that offers a static function that
+creates map/reduce jobs:
+<blockquote>
+<pre>
+public static JobConf createValueAggregatorJob(String args&#91;]) throws IOException;
+</pre>
+</blockquote>
+To call this function, the user needs to pass in arguments specifying the input directories, the output directory,
+the number of reducers, the input data format (textinputformat or sequencefileinputformat), and a file specifying user plugin class(es) to load by the mapper.
+A user plugin class is responsible for specifying what
+aggregators to use and what values are for which aggregators.
+A plugin class must implement the following interface:
+<blockquote>
+<pre>
+ public interface ValueAggregatorDescriptor {
+ public ArrayList&#60;Entry&#62; generateKeyValPairs(Object key, Object value);
+ public void configure(JobConfjob);
+}
+</pre>
+</blockquote>
+Function generateKeyValPairs will generate aggregation key/value pairs for the
+input key/value pair. Each aggregation key encodes two pieces of information: the aggregation type and aggregation ID.
+The value is the value to be aggregated onto the aggregation ID according to the aggregation type. Here
+is a simple example user plugin class for counting the words in the input texts:
+<blockquote>
+<pre>
+public class WordCountAggregatorDescriptor extends ValueAggregatorBaseDescriptor {
+ public ArrayList&#60;Entry&#62; generateKeyValPairs(Object key, Object val) {
+ String words &#91;] &#61; val.toString().split(&#34; &#124;\t&#34;);
+ ArrayList&#60;Entry&#62; retv &#61; new ArrayList&#60;Entry&#62;();
+ for (int i &#61; 0; i &#60; words.length; i++) {
+ retv.add(generateEntry(LONG&#95;VALUE&#95;SUM, words&#91;i], ONE))
+ }
+ return retv;
+ }
+ public void configure(JobConf job) {}
+}
+</pre>
+</blockquote>
+In the above code, LONG_VALUE_SUM is a string denoting the aggregation type LongValueSum, which sums over long values.
+ONE denotes a string "1". Function generateEntry(LONG_VALUE_SUM, words[i], ONE) will inperpret the first argument as an aggregation type, the second as an aggregation ID, and the third argumnent as the value to be aggregated. The output will look like: "LongValueSum:xxxx", where XXXX is the string value of words[i]. The value will be "1". The mapper will call generateKeyValPairs(Object key, Object val) for each input key/value pair to generate the desired aggregation id/value pairs.
+The down stream combiner/reducer will interpret these pairs as adding one to the aggregator XXXX.
+<p />
+Class ValueAggregatorBaseDescriptor is a base class that user plugin classes can extend. Here is the XML fragment specifying the user plugin class:
+<blockquote>
+<pre>
+&#60;property&#62;
+ &#60;name&#62;aggregator.descriptor.num&#60;/name&#62;
+ &#60;value&#62;1&#60;/value&#62;
+&#60;/property&#62;
+&#60;property&#62;
+ &#60;name&#62;aggregator.descriptor.0&#60;/name&#62;
+ &#60;value&#62;UserDefined,org.apache.hadoop.mapred.lib.aggregate.examples.WordCountAggregatorDescriptor&#60;/value&#62;
+&#60;/property&#62;
+</pre>
+</blockquote>
+Class ValueAggregatorBaseDescriptor itself provides a default implementation for generateKeyValPairs:
+<blockquote>
+<pre>
+public ArrayList&#60;Entry&#62; generateKeyValPairs(Object key, Object val) {
+ ArrayList&#60;Entry&#62; retv &#61; new ArrayList&#60;Entry&#62;();
+ String countType &#61; LONG&#95;VALUE&#95;SUM;
+ String id &#61; &#34;record&#95;count&#34;;
+ retv.add(generateEntry(countType, id, ONE));
+ return retv;
+}
+</pre>
+</blockquote>
+Thus, if no user plugin class is specified, the default behavior of the map/reduce job is to count the number of records (lines) in the imput files.
+<p />
+During runtime, the mapper will invoke the generateKeyValPairs function for each input key/value pair, and emit the generated
+key/value pairs:
+<blockquote>
+<pre>
+public void map(WritableComparable key, Writable value,
+ OutputCollector output, Reporter reporter) throws IOException {
+ Iterator iter &#61; this.aggregatorDescriptorList.iterator();
+ while (iter.hasNext()) {
+ ValueAggregatorDescriptor ad &#61; (ValueAggregatorDescriptor) iter.next();
+ Iterator&#60;Entry&#62; ens &#61; ad.generateKeyValPairs(key, value).iterator();
+ while (ens.hasNext()) {
+ Entry en &#61; ens.next();
+ output.collect((WritableComparable)en.getKey(), (Writable)en.getValue());
+ }
+ }
+}
+</pre>
+</blockquote>
+The reducer will create an aggregator object for each key/value list pair, and perform the appropriate aggregation.
+At the end, it will emit the aggregator's results:
+<blockquote>
+<pre>
+public void reduce(WritableComparable key, Iterator values,
+ OutputCollector output, Reporter reporter) throws IOException {
+ String keyStr &#61; key.toString();
+ int pos &#61; keyStr.indexOf(ValueAggregatorDescriptor.TYPE&#95;SEPARATOR);
+ String type &#61; keyStr.substring(0,pos);
+ keyStr &#61; keyStr.substring(pos+ValueAggregatorDescriptor.TYPE&#95;SEPARATOR.length());
+ ValueAggregator aggregator &#61;
+ ValueAggregatorBaseDescriptor.generateValueAggregator(type);
+ while (values.hasNext()) {
+ aggregator.addNextValue(values.next());
+ }
+ String val &#61; aggregator.getReport();
+ key &#61; new Text(keyStr);
+ output.collect(key, new Text(val));
+}
+</pre>
+</blockquote>
+In order to be able to use combiner, all the aggregation type be aggregators must be associative and communitive.
+The following are the types supported: <ul>
+<li> LongValueSum: sum over long values
+</li> <li> DoubleValueSum: sum over float/double values
+</li> <li> uniqValueCount: count the number of distinct values
+</li> <li> ValueHistogram: compute the histogram of values compute the minimum, maximum, media,average, standard deviation of numeric values
+</li></ul>
+<p />
+<h2><a name="Create_and_run"></a> Create and run an application </h2>
+<p />
+To create an application, the user needs to do the following things:
+<p />
+1. Implement a user plugin:
+<blockquote>
+<pre>
+import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor;
+import org.apache.hadoop.mapred.JobConf;
+
+public class WordCountAggregatorDescriptor extends ValueAggregatorBaseDescriptor {
+ public void map(WritableComparable key, Writable value,
+ OutputCollector output, Reporter reporter) throws IOException {
+ }
+ public void configure(JobConf job) {
+
+ }
+}
+</pre>
+</blockquote>
+
+2. Create an xml file specifying the user plugin.
+<p />
+3. Compile your java class and create a jar file, say wc.jar.
+
+<p />
+Finally, run the job:
+<blockquote>
+<pre>
+ hadoop jar wc.jar org.apache.hadoop.mapred.lib.aggregate..ValueAggregatorJob indirs outdir numofreducers textinputformat|sequencefileinputformat spec_file
+</pre>
+</blockquote>
+<p />]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.mapred.pipes">
+ <!-- start class org.apache.hadoop.mapred.pipes.Submitter -->
+ <class name="Submitter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Submitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExecutable" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the URI of the application's executable.
+ @param conf
+ @return the URI where the application's executable is located]]>
+ </doc>
+ </method>
+ <method name="setExecutable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="executable" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the URI for the application's executable. Normally this is a hdfs:
+ location.
+ @param conf
+ @param executable The URI of the application's executable.]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job is using a Java RecordReader.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordReader" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java RecordReader
+ @param conf the configuration to check
+ @return is it a Java RecordReader?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Mapper is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaMapper" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Mapper.
+ @param conf the configuration to check
+ @return is it a Java Mapper?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaReducer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Reducer is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaReducer" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Reducer.
+ @param conf the configuration to check
+ @return is it a Java Reducer?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job will use a Java RecordWriter.
+ @param conf the configuration to modify
+ @param value the new value to set]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordWriter" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Will the reduce use a Java RecordWriter?
+ @param conf the configuration to check
+ @return true, if the output of the job will be written by Java]]>
+ </doc>
+ </method>
+ <method name="getKeepCommandFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Does the user want to keep the command file for debugging? If this is
+ true, pipes will write a copy of the command data to a file in the
+ task directory named "downlink.data", which may be used to run the C++
+ program under the debugger. You probably also want to set
+ JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from
+ being deleted.
+ To run using the data file, set the environment variable
+ "hadoop.pipes.command.file" to point to the file.
+ @param conf the configuration to check
+ @return will the framework save the command file?]]>
+ </doc>
+ </method>
+ <method name="setKeepCommandFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether to keep the command file for debugging
+ @param conf the configuration to modify
+ @param keep the new value]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications
+ to the job to run under pipes are made to the configuration.
+ @param conf the job to submit to the cluster (MODIFIED)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Submit a pipes job based on the command line arguments.
+ @param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The main entry point and job submitter. It may either be used as a command
+ line-based or API-based method to launch Pipes jobs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.pipes.Submitter -->
+ <doc>
+ <![CDATA[Hadoop Pipes allows C++ code to use Hadoop DFS and map/reduce. The
+primary approach is to split the C++ code into a separate process that
+does the application specific code. In many ways, the approach will be
+similar to Hadoop streaming, but using Writable serialization to
+convert the types into bytes that are sent to the process via a
+socket.
+
+<p>
+
+The class org.apache.hadoop.mapred.pipes.Submitter has a public static
+method to submit a job as a JobConf and a main method that takes an
+application and optional configuration file, input directories, and
+output directory. The cli for the main looks like:
+
+<pre>
+bin/hadoop pipes \
+ [-conf <i>path</i>] \
+ [-input <i>inputDir</i>] \
+ [-output <i>outputDir</i>] \
+ [-jar <i>applicationJarFile</i>] \
+ [-inputformat <i>class</i>] \
+ [-map <i>class</i>] \
+ [-partitioner <i>class</i>] \
+ [-reduce <i>class</i>] \
+ [-writer <i>class</i>] \
+ [-program <i>program url</i>]
+</pre>
+
+<p>
+
+The application programs link against a thin C++ wrapper library that
+handles the communication with the rest of the Hadoop system. The C++
+interface is "swigable" so that interfaces can be generated for python
+and other scripting languages. All of the C++ functions and classes
+are in the HadoopPipes namespace. The job may consist of any
+combination of Java and C++ RecordReaders, Mappers, Paritioner,
+Combiner, Reducer, and RecordWriter.
+
+<p>
+
+Hadoop Pipes has a generic Java class for handling the mapper and
+reducer (PipesMapRunner and PipesReducer). They fork off the
+application program and communicate with it over a socket. The
+communication is handled by the C++ wrapper library and the
+PipesMapRunner and PipesReducer.
+
+<p>
+
+The application program passes in a factory object that can create
+the various objects needed by the framework to the runTask
+function. The framework creates the Mapper or Reducer as
+appropriate and calls the map or reduce method to invoke the
+application's code. The JobConf is available to the application.
+
+<p>
+
+The Mapper and Reducer objects get all of their inputs, outputs, and
+context via context objects. The advantage of using the context
+objects is that their interface can be extended with additional
+methods without breaking clients. Although this interface is different
+from the current Java interface, the plan is to migrate the Java
+interface in this direction.
+
+<p>
+
+Although the Java implementation is typed, the C++ interfaces of keys
+and values is just a byte buffer. Since STL strings provide precisely
+the right functionality and are standard, they will be used. The
+decision to not use stronger types was to simplify the interface.
+
+<p>
+
+The application can also define combiner functions. The combiner will
+be run locally by the framework in the application process to avoid
+the round trip to the Java process and back. Because the compare
+function is not available in C++, the combiner will use memcmp to
+sort the inputs to the combiner. This is not as general as the Java
+equivalent, which uses the user's comparator, but should cover the
+majority of the use cases. As the map function outputs key/value
+pairs, they will be buffered. When the buffer is full, it will be
+sorted and passed to the combiner. The output of the combiner will be
+sent to the Java process.
+
+<p>
+
+The application can also set a partition function to control which key
+is given to a particular reduce. If a partition function is not
+defined, the Java one will be used. The partition function will be
+called by the C++ framework before the key/value pair is sent back to
+Java.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.metrics">
+ <!-- start class org.apache.hadoop.metrics.ContextFactory -->
+ <class name="ContextFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ContextFactory"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of ContextFactory]]>
+ </doc>
+ </constructor>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the named attribute, or null if there is no
+ attribute of that name.
+
+ @param attributeName the attribute name
+ @return the attribute value]]>
+ </doc>
+ </method>
+ <method name="getAttributeNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all the factory's attributes.
+
+ @return the attribute names]]>
+ </doc>
+ </method>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Sets the named factory attribute to the specified value, creating it
+ if it did not already exist. If the value is null, this is the same as
+ calling removeAttribute.
+
+ @param attributeName the attribute name
+ @param value the new attribute value]]>
+ </doc>
+ </method>
+ <method name="removeAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes the named attribute if it exists.
+
+ @param attributeName the attribute name]]>
+ </doc>
+ </method>
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <exception name="InstantiationException" type="java.lang.InstantiationException"/>
+ <exception name="IllegalAccessException" type="java.lang.IllegalAccessException"/>
+ <doc>
+ <![CDATA[Returns the named MetricsContext instance, constructing it if necessary
+ using the factory's current configuration attributes. <p/>
+
+ When constructing the instance, if the factory property
+ <i>contextName</i>.class</code> exists,
+ its value is taken to be the name of the class to instantiate. Otherwise,
+ the default is to create an instance of
+ <code>org.apache.hadoop.metrics.spi.NullContext</code>, which is a
+ dummy "no-op" context which will cause all metric data to be discarded.
+
+ @param contextName the name of the context
+ @return the named MetricsContext]]>
+ </doc>
+ </method>
+ <method name="getNullContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a "null" context - one which does nothing.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the singleton ContextFactory instance, constructing it if
+ necessary. <p/>
+
+ When the instance is constructed, this method checks if the file
+ <code>hadoop-metrics.properties</code> exists on the class path. If it
+ exists, it must be in the format defined by java.util.Properties, and all
+ the properties in the file are set as attributes on the newly created
+ ContextFactory instance.
+
+ @return the singleton ContextFactory instance]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factory class for creating MetricsContext objects. To obtain an instance
+ of this class, use the static <code>getFactory()</code> method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ContextFactory -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsContext -->
+ <interface name="MetricsContext" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.
+
+ @return the context name]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records as they are
+ updated.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free any data that the implementation
+ may have buffered for sending at the next timer event. It
+ is OK to call <code>startMonitoring()</code> again after calling
+ this.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and also frees any buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new MetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at regular time intervals, as
+ determined by the implementation-class specific configuration.
+
+ @param updater object to be run periodically; it should updated
+ some metrics records and then return]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_PERIOD" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default period in seconds at which data is sent to the metrics system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The main interface to the metrics package.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.MetricsException -->
+ <class name="MetricsException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException]]>
+ </doc>
+ </constructor>
+ <constructor name="MetricsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException
+
+ @param message an error message]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[General-purpose, unchecked metrics exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsException -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsRecord -->
+ <interface name="MetricsRecord" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value. The tagValue may be null,
+ which is treated the same as an empty String.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.
+
+ @param tagName name of a tag]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes, from the buffered data table, all rows having tags
+ that equal the tags that have been set on this record. For example,
+ if there are no tags on this record, all rows for this record name
+ would be removed. Or, if there is a single tag on this record, then
+ just rows containing a tag with the same name and value would be removed.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A named and optionally tagged set of records to be sent to the metrics
+ system. <p/>
+
+ A record name identifies the kind of data to be reported. For example, a
+ program reporting statistics relating to the disks on a computer might use
+ a record name "diskStats".<p/>
+
+ A record has zero or more <i>tags</i>. A tag has a name and a value. To
+ continue the example, the "diskStats" record might use a tag named
+ "diskName" to identify a particular disk. Sometimes it is useful to have
+ more than one tag, so there might also be a "diskType" with value "ide" or
+ "scsi" or whatever.<p/>
+
+ A record also has zero or more <i>metrics</i>. These are the named
+ values that are to be reported to the metrics system. In the "diskStats"
+ example, possible metric names would be "diskPercentFull", "diskPercentBusy",
+ "kbReadPerSecond", etc.<p/>
+
+ The general procedure for using a MetricsRecord is to fill in its tag and
+ metric values, and then call <code>update()</code> to pass the record to the
+ client library.
+ Metric data is not immediately sent to the metrics system
+ each time that <code>update()</code> is called.
+ An internal table is maintained, identified by the record name. This
+ table has columns
+ corresponding to the tag and the metric names, and rows
+ corresponding to each unique set of tag values. An update
+ either modifies an existing row in the table, or adds a new row with a set of
+ tag values that are different from all the other rows. Note that if there
+ are no tags, then there can be at most one row in the table. <p/>
+
+ Once a row is added to the table, its data will be sent to the metrics system
+ on every timer period, whether or not it has been updated since the previous
+ timer period. If this is inappropriate, for example if metrics were being
+ reported by some transient object in an application, the <code>remove()</code>
+ method can be used to remove the row and thus stop the data from being
+ sent.<p/>
+
+ Note that the <code>update()</code> method is atomic. This means that it is
+ safe for different threads to be updating the same metric. More precisely,
+ it is OK for different threads to call <code>update()</code> on MetricsRecord instances
+ with the same set of tag names and tag values. Different threads should
+ <b>not</b> use the same MetricsRecord instance at the same time.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsRecord -->
+ <!-- start class org.apache.hadoop.metrics.MetricsUtil -->
+ <class name="MetricsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to return the named context.
+ If the desired context cannot be created for any reason, the exception
+ is logged, and a null context is returned.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to create and return new metrics record instance within the
+ given context. This record is tagged with the host name.
+
+ @param context the context
+ @param recordName name of the record
+ @return newly created metrics record]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility class to simplify creation and reporting of hadoop metrics.
+
+ For examples of usage, see {@link org.apache.hadoop.dfs.DataNode}.
+ @see org.apache.hadoop.metrics.MetricsRecord
+ @see org.apache.hadoop.metrics.MetricsContext
+ @see org.apache.hadoop.metrics.ContextFactory]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsUtil -->
+ <!-- start interface org.apache.hadoop.metrics.Updater -->
+ <interface name="Updater" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Timer-based call-back from the metric library.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Call-back interface. See <code>MetricsContext.registerUpdater()</code>.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.Updater -->
+ <doc>
+ <![CDATA[This package defines an API for reporting performance metric information.
+<p/>
+The API is abstract so that it can be implemented on top of
+a variety of metrics client libraries. The choice of
+client library is a configuration option, and different
+modules within the same application can use
+different metrics implementation libraries.
+<p/>
+Sub-packages:
+<dl>
+ <dt><code>org.apache.hadoop.metrics.spi</code></dt>
+ <dd>The abstract Server Provider Interface package. Those wishing to
+ integrate the metrics API with a particular metrics client library should
+ extend this package.</dd>
+
+ <dt><code>org.apache.hadoop.metrics.file</code></dt>
+ <dd>An implementation package which writes the metric data to
+ a file, or sends it to the standard output stream.</dd>
+
+ <dt> <code>org.apache.hadoop.metrics.ganglia</code></dt>
+ <dd>An implementation package which sends metric data to
+ <a href="http://ganglia.sourceforge.net/">Ganglia</a>.</dd>
+</dl>
+
+<h3>Introduction to the Metrics API</h3>
+
+Here is a simple example of how to use this package to report a single
+metric value:
+<pre>
+ private ContextFactory contextFactory = ContextFactory.getFactory();
+
+ void reportMyMetric(float myMetric) {
+ MetricsContext myContext = contextFactory.getContext("myContext");
+ MetricsRecord myRecord = myContext.getRecord("myRecord");
+ myRecord.setMetric("myMetric", myMetric);
+ myRecord.update();
+ }
+</pre>
+
+In this example there are three names:
+<dl>
+ <dt><i>myContext</i></dt>
+ <dd>The context name will typically identify either the application, or else a
+ module within an application or library.</dd>
+
+ <dt><i>myRecord</i></dt>
+ <dd>The record name generally identifies some entity for which a set of
+ metrics are to be reported. For example, you could have a record named
+ "cacheStats" for reporting a number of statistics relating to the usage of
+ some cache in your application.</dd>
+
+ <dt><i>myMetric</i></dt>
+ <dd>This identifies a particular metric. For example, you might have metrics
+ named "cache_hits" and "cache_misses".
+ </dd>
+</dl>
+
+<h3>Tags</h3>
+
+In some cases it is useful to have multiple records with the same name. For
+example, suppose that you want to report statistics about each disk on a computer.
+In this case, the record name would be something like "diskStats", but you also
+need to identify the disk which is done by adding a <i>tag</i> to the record.
+The code could look something like this:
+<pre>
+ private MetricsRecord diskStats =
+ contextFactory.getContext("myContext").getRecord("diskStats");
+
+ void reportDiskMetrics(String diskName, float diskBusy, float diskUsed) {
+ diskStats.setTag("diskName", diskName);
+ diskStats.setMetric("diskBusy", diskBusy);
+ diskStats.setMetric("diskUsed", diskUsed);
+ diskStats.update();
+ }
+</pre>
+
+<h3>Buffering and Callbacks</h3>
+
+Data is not sent immediately to the metrics system when
+<code>MetricsRecord.update()</code> is called. Instead it is stored in an
+internal table, and the contents of the table are sent periodically.
+This can be important for two reasons:
+<ol>
+ <li>It means that a programmer is free to put calls to this API in an
+ inner loop, since updates can be very frequent without slowing down
+ the application significantly.</li>
+ <li>Some implementations can gain efficiency by combining many metrics
+ into a single UDP message.</li>
+</ol>
+
+The API provides a timer-based callback via the
+<code>registerUpdater()</code> method. The benefit of this
+versus using <code>java.util.Timer</code> is that the callbacks will be done
+immediately before sending the data, making the data as current as possible.
+
+<h3>Configuration</h3>
+
+It is possible to programmatically examine and modify configuration data
+before creating a context, like this:
+<pre>
+ ContextFactory factory = ContextFactory.getFactory();
+ ... examine and/or modify factory attributes ...
+ MetricsContext context = factory.getContext("myContext");
+</pre>
+The factory attributes can be examined and modified using the following
+<code>ContextFactory</code>methods:
+<ul>
+ <li><code>Object getAttribute(String attributeName)</code></li>
+ <li><code>String[] getAttributeNames()</code></li>
+ <li><code>void setAttribute(String name, Object value)</code></li>
+ <li><code>void removeAttribute(attributeName)</code></li>
+</ul>
+
+<p/>
+<code>ContextFactory.getFactory()</code> initializes the factory attributes by
+reading the properties file <code>hadoop-metrics.properties</code> if it exists
+on the class path.
+
+<p/>
+A factory attribute named:
+<pre>
+<i>contextName</i>.class
+</pre>
+should have as its value the fully qualified name of the class to be
+instantiated by a call of the <code>CodeFactory</code> method
+<code>getContext(<i>contextName</i>)</code>. If this factory attribute is not
+specified, the default is to instantiate
+<code>org.apache.hadoop.metrics.file.FileContext</code>.
+
+<p/>
+Other factory attributes are specific to a particular implementation of this
+API and are documented elsewhere. For example, configuration attributes for
+the file and Ganglia implementations can be found in the javadoc for
+their respective packages.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.metrics.file">
+ <!-- start class org.apache.hadoop.metrics.file.FileContext -->
+ <class name="FileContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="getFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the configured file name, or null.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, by opening in append-mode, the
+ file specified by the <code>fileName</code> attribute,
+ if specified. Otherwise the data will be written to standard
+ output.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring, closing the file.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Emits a metrics record to a file.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Flushes the output writer, forcing updates to disk.]]>
+ </doc>
+ </method>
+ <field name="FILE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="PERIOD_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Metrics context for writing metrics to a file.<p/>
+
+ This class is configured by setting ContextFactory attributes which in turn
+ are usually configured through a properties file. All the attributes are
+ prefixed by the contextName. For example, the properties file might contain:
+ <pre>
+ myContextName.fileName=/tmp/metrics.log
+ myContextName.period=5
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.file.FileContext -->
+ <doc>
+ <![CDATA[Implementation of the metrics package that writes the metrics to a file.
+Programmers should not normally need to use this package directly. Instead
+they should use org.hadoop.metrics.
+
+<p/>
+These are the implementation specific factory attributes
+(See ContextFactory.getFactory()):
+
+<dl>
+ <dt><i>contextName</i>.fileName</dt>
+ <dd>The path of the file to which metrics in context <i>contextName</i>
+ are to be appended. If this attribute is not specified, the metrics
+ are written to standard output by default.</dd>
+
+ <dt><i>contextName</i>.period</dt>
+ <dd>The period in seconds on which the metric data is written to the
+ file.</dd>
+
+</dl>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.metrics.ganglia">
+ <!-- start class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+ <class name="GangliaContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GangliaContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of GangliaContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Context for sending metrics to Ganglia.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+ <doc>
+ <![CDATA[<!--
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+-->
+
+Implementation of the metrics package that sends metric data to
+<a href="http://ganglia.sourceforge.net/">Ganglia</a>.
+Programmers should not normally need to use this package directly. Instead
+they should use org.hadoop.metrics.
+
+<p/>
+These are the implementation specific factory attributes
+(See ContextFactory.getFactory()):
+
+<dl>
+ <dt><i>contextName</i>.servers</dt>
+ <dd>Space and/or comma separated sequence of servers to which UDP
+ messages should be sent.</dd>
+
+ <dt><i>contextName</i>.period</dt>
+ <dd>The period in seconds on which the metric data is sent to the
+ server(s).</dd>
+
+ <dt><i>contextName</i>.units.<i>recordName</i>.<i>metricName</i></dt>
+ <dd>The units for the specified metric in the specified record.</dd>
+
+ <dt><i>contextName</i>.slope.<i>recordName</i>.<i>metricName</i></dt>
+ <dd>The slope for the specified metric in the specified record.</dd>
+
+ <dt><i>contextName</i>.tmax.<i>recordName</i>.<i>metricName</i></dt>
+ <dd>The tmax for the specified metric in the specified record.</dd>
+
+ <dt><i>contextName</i>.dmax.<i>recordName</i>.<i>metricName</i></dt>
+ <dd>The dmax for the specified metric in the specified record.</dd>
+
+</dl>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.metrics.jvm">
+ <!-- start class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <class name="EventCounter" extends="org.apache.log4j.AppenderSkeleton"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="EventCounter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getFatal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getError" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWarn" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfo" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="requiresLayout" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A log4J Appender that simply counts logging events in three levels:
+ fatal, error and warn.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <!-- start class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+ <class name="JvmMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="init" return="org.apache.hadoop.metrics.jvm.JvmMetrics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="processName" type="java.lang.String"/>
+ <param name="sessionId" type="java.lang.String"/>
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[This will be called periodically (with the period being configuration
+ dependent).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Singleton class which eports Java Virtual Machine metrics to the metrics API.
+ Any application can create an instance of this class in order to emit
+ Java VM metrics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+</package>
+<package name="org.apache.hadoop.metrics.spi">
+ <!-- start class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <class name="AbstractMetricsContext" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsContext"/>
+ <constructor name="AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of AbstractMetricsContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ <doc>
+ <![CDATA[Initializes the context.]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for subclasses to access factory attributes.]]>
+ </doc>
+ </method>
+ <method name="getAttributeTable" return="java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="tableName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an attribute-value map derived from the factory attributes
+ by finding all factory attributes that begin with
+ <i>contextName</i>.<i>tableName</i>. The returned map consists of
+ those attributes with the contextName and tableName stripped off.]]>
+ </doc>
+ </method>
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.]]>
+ </doc>
+ </method>
+ <method name="getContextFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the factory by which this context was created.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free buffered data.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and frees buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new AbstractMetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="newRecord" return="org.apache.hadoop.metrics.spi.MetricsRecordImpl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Subclasses should override this if they subclass MetricsRecordImpl.
+ @param recordName the name of the record
+ @return newly created instance of MetricsRecordImpl or subclass]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at time intervals determined by
+ the configuration.
+
+ @param updater object to be run periodically; it should update
+ some metrics records]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sends a record to the metrics system.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called each period after all records have been emitted, this method does nothing.
+ Subclasses may override it in order to perform some kind of flush.]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.update(). Creates or updates a row in
+ the internal table of metric data.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.remove(). Removes all matching rows in
+ the internal table of metric data. A row matches if it has the same
+ tag names and values as record, but it may also have additional
+ tags.]]>
+ </doc>
+ </method>
+ <method name="getPeriod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the timer period.]]>
+ </doc>
+ </method>
+ <method name="setPeriod"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="period" type="int"/>
+ <doc>
+ <![CDATA[Sets the timer period]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The main class of the Service Provider Interface. This class should be
+ extended in order to integrate the Metrics API with a specific metrics
+ client library. <p/>
+
+ This class implements the internal table of metric data, and the timer
+ on which data is to be sent to the metrics system. Subclasses must
+ override the abstract <code>emitRecord</code> method in order to transmit
+ the data. <p/>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <class name="MetricsRecordImpl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsRecord"/>
+ <constructor name="MetricsRecordImpl" type="java.lang.String, org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileRecord]]>
+ </doc>
+ </constructor>
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes the row, if it exists, in the buffered data table having tags
+ that equal the tags that have been set on this record.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of MetricsRecord. Keeps a back-pointer to the context
+ from which it was created, and delegates back to it on <code>update</code>
+ and <code>remove()</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricValue -->
+ <class name="MetricValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricValue" type="java.lang.Number, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricValue]]>
+ </doc>
+ </constructor>
+ <method name="isIncrement" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumber" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="ABSOLUTE" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCREMENT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Number that is either an absolute or an incremental amount.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricValue -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContext -->
+ <class name="NullContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContext]]>
+ </doc>
+ </constructor>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do-nothing version of startMonitoring]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Null metrics context: a metrics context which does nothing. Used as the
+ default context, so that no performance data is emitted if no configuration
+ data is found.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <class name="NullContextWithUpdateThread" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContextWithUpdateThread"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContextWithUpdateThread]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A null context which has a thread calling
+ periodically when monitoring is started. This keeps the data sampled
+ correctly.
+ In all other respects, this is like the NULL context: No data is emitted.
+ This is suitable for Monitoring systems like JMX which reads the metrics
+ when someone reads the data from JMX.
+
+ The default impl of start and stop monitoring:
+ is the AbstractMetricsContext is good enough.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <!-- start class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <class name="OutputRecord" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTagNames" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of tag names]]>
+ </doc>
+ </method>
+ <method name="getTag" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a tag object which is can be a String, Integer, Short or Byte.
+
+ @return the tag value, or null if there is no such tag]]>
+ </doc>
+ </method>
+ <method name="getMetricNames" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of metric names.]]>
+ </doc>
+ </method>
+ <method name="getMetric" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the metric object which can be a Float, Integer, Short or Byte.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents a record of metric data to be sent to a metrics system.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <!-- start class org.apache.hadoop.metrics.spi.Util -->
+ <class name="Util" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="parse" return="java.util.List&lt;java.net.InetSocketAddress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="specs" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Parses a space and/or comma separated sequence of server specifications
+ of the form <i>hostname</i> or <i>hostname:port</i>. If
+ the specs string is null, defaults to localhost:defaultPort.
+
+ @return a list of InetSocketAddress objects.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Static utility methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.Util -->
+ <doc>
+ <![CDATA[The Service Provider Interface for the Metrics API. This package provides
+an interface allowing a variety of metrics reporting implementations to be
+plugged in to the Metrics API. Examples of such implementations can be found
+in the packages <code>org.apache.hadoop.metrics.file</code> and
+<code>org.apache.hadoop.metrics.ganglia</code>.<p/>
+
+Plugging in an implementation involves writing a concrete subclass of
+<code>AbstractMetricsContext</code>. The subclass should get its
+ configuration information using the <code>getAttribute(<i>attributeName</i>)</code>
+ method.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.metrics.util">
+ <!-- start class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <class name="MBeanUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MBeanUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="registerMBean" return="javax.management.ObjectName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="serviceName" type="java.lang.String"/>
+ <param name="nameName" type="java.lang.String"/>
+ <param name="theMbean" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Register the mbean using out standard MBeanName format
+ "hadoop.dfs:service=<serviceName>,name=<nameName>"
+ Where the <serviceName> and <nameName> are the supplied parameters
+
+ @param serviceName
+ @param nameName
+ @param theMbean - the MBean to register
+ @return the named used to register the MBean]]>
+ </doc>
+ </method>
+ <method name="unregisterMBean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mbeanName" type="javax.management.ObjectName"/>
+ </method>
+ <doc>
+ <![CDATA[This util class provides a method to register an MBean using
+ our standard naming convention as described in the doc
+ for {link {@link #registerMBean(String, String, Object)}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <class name="MetricsIntValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsIntValue" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="int"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsIntValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <class name="MetricsTimeVaryingInt" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingInt" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - number of operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #previousIntervalValue}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Value at the Previous interval
+ @return prev interval value]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingInt class is for a metric that naturally
+ varies over time (e.g. number of files created).
+ The metric is is published at interval heart beat (the interval
+ is set in the metrics config file).
+ Note if one wants a time associated with the metric then use
+ @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+ <class name="MetricsTimeVaryingRate" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingRate" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param n the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numOps" type="int"/>
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for numOps operations
+ @param numOps - number of operations
+ @param time - time for numOps operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for one operation
+ @param time for one operation]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and
+ {@link #getPreviousIntervalNumOps()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalNumOps" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of operations in the previous interval
+ @return - ops in prev interval]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalAverageTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The average rate of an operation in the previous interval
+ @return - the average rate.]]>
+ </doc>
+ </method>
+ <method name="getMinTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The min time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return min time for an operation]]>
+ </doc>
+ </method>
+ <method name="getMaxTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The max time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return max time for an operation]]>
+ </doc>
+ </method>
+ <method name="resetMinMax"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the min max values]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingRate class is for a rate based metric that
+ naturally varies over time (e.g. time taken to create a file).
+ The rate is averaged at each interval heart beat (the interval
+ is set in the metrics config file).
+ This class also keeps track of the min and max rates along with
+ a method to reset the min-max.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+</package>
+<package name="org.apache.hadoop.net">
+ <!-- start class org.apache.hadoop.net.DNS -->
+ <class name="DNS" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DNS"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reverseDns" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hostIp" type="java.net.InetAddress"/>
+ <param name="ns" type="java.lang.String"/>
+ <exception name="NamingException" type="javax.naming.NamingException"/>
+ <doc>
+ <![CDATA[Returns the hostname associated with the specified IP address by the
+ provided nameserver.
+
+ @param hostIp
+ The address to reverse lookup
+ @param ns
+ The host name of a reachable DNS server
+ @return The host name associated with the provided IP
+ @throws NamingException
+ If a NamingException is encountered]]>
+ </doc>
+ </method>
+ <method name="getIPs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the IPs associated with the provided interface, if any, in
+ textual form.
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return A string vector of all the IPs associated with the provided
+ interface
+ @throws UnknownHostException
+ If an UnknownHostException is encountered in querying the
+ default interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultIP" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the first available IP address associated with the provided
+ network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The IP address in text form
+ @throws UnknownHostException
+ If one is encountered in querying the default interface]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the provided nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return A string vector of all host names associated with the IPs tied to
+ the specified interface
+ @throws UnknownHostException]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the default nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The list of host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the provided
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return The default host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the default
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The default host name associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides direct and reverse lookup functionalities, allowing
+ the querying of specific network interfaces or nameservers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.DNS -->
+ <!-- start interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <interface name="DNSToSwitchMapping" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="resolve" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List&lt;java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[Resolves a list of DNS-names/IP-addresses and returns back a list of
+ switch information (network paths). One-to-one correspondence must be
+ maintained between the elements in the lists.
+ Consider an element in the argument list - x.y.com. The switch information
+ that is returned must be a network path of the form /foo/rack,
+ where / is the root, and 'foo' is the switch where 'rack' is connected.
+ Note the hostname/ip-address is not part of the returned path.
+ The network topology of the cluster would determine the number of
+ components in the network path.
+ @param names
+ @return list of resolved network paths]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An interface that should be implemented to allow pluggable
+ DNS-name/IP-address to RackID resolvers.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <!-- start class org.apache.hadoop.net.NetUtils -->
+ <class name="NetUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="clazz" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Get the socket factory for the given class according to its
+ configuration parameter
+ <tt>hadoop.rpc.socket.factory.class.&lt;ClassName&gt;</tt>. When no
+ such parameter exists then fall back on the default socket factory as
+ configured by <tt>hadoop.rpc.socket.factory.class.default</tt>. If
+ this default socket factory is not configured, then fall back on the JVM
+ default socket factory.
+
+ @param conf the configuration
+ @param clazz the class (usually a {@link VersionedProtocol})
+ @return a socket factory]]>
+ </doc>
+ </method>
+ <method name="getDefaultSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default socket factory as specified by the configuration
+ parameter <tt>hadoop.rpc.socket.factory.default</tt>
+
+ @param conf the configuration
+ @return the default socket factory as specified in the configuration or
+ the JVM default socket factory if the configuration does not
+ contain a default socket factory property.]]>
+ </doc>
+ </method>
+ <method name="getSocketFactoryFromProperty" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="propValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the socket factory corresponding to the given proxy URI. If the
+ given proxy URI corresponds to an absence of configuration parameter,
+ returns null. If the URI is malformed raises an exception.
+
+ @param propValue the property which is the class name of the
+ SocketFactory to instantiate; assumed non null and non empty.
+ @return a socket factory as defined in the property value.]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="getServerAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="oldBindAddressName" type="java.lang.String"/>
+ <param name="oldPortName" type="java.lang.String"/>
+ <param name="newBindAddressName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Handle the transition from pairs of attributes specifying a host and port
+ to a single colon separated one.
+ @param conf the configuration to check
+ @param oldBindAddressName the old address attribute name
+ @param oldPortName the old port attribute name
+ @param newBindAddressName the new combined name
+ @return the complete address from the configuration]]>
+ </doc>
+ </method>
+ <method name="addStaticResolution"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="resolvedName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a static resolution for host. This can be used for setting up
+ hostnames with names that are fake to point to a well known host. For e.g.
+ in some testcases we require to have daemons with different hostnames
+ running on the same machine. In order to create connections to these
+ daemons, one can set up mappings from those hostnames to "localhost".
+ {@link NetUtils#getStaticResolution(String)} can be used to query for
+ the actual hostname.
+ @param host
+ @param resolvedName]]>
+ </doc>
+ </method>
+ <method name="getStaticResolution" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Retrieves the resolved name for the passed host. The resolved name must
+ have been set earlier using
+ {@link NetUtils#addStaticResolution(String, String)}
+ @param host
+ @return the resolution]]>
+ </doc>
+ </method>
+ <method name="getAllStaticResolutions" return="java.util.List&lt;java.lang.String[]&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is used to get all the resolutions that were added using
+ {@link NetUtils#addStaticResolution(String, String)}. The return
+ value is a List each element of which contains an array of String
+ of the form String[0]=hostname, String[1]=resolved-hostname
+ @return the list of resolutions]]>
+ </doc>
+ </method>
+ <method name="getConnectAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="server" type="org.apache.hadoop.ipc.Server"/>
+ <doc>
+ <![CDATA[Returns InetSocketAddress that a client can use to
+ connect to the server. Server.getListenerAddress() is not correct when
+ the server binds to "0.0.0.0". This returns "127.0.0.1:port" when
+ the getListenerAddress() returns "0.0.0.0:port".
+
+ @param server
+ @return socket address that a client can use to connect to the server.]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getInputStream(socket, socket.getSoTimeout()).<br><br>
+
+ From documentation for {@link #getInputStream(Socket, long)}:<br>
+ Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see #getInputStream(Socket, long)
+
+ @param socket
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getOutputStream(socket, 0). Timeout of zero implies write will
+ wait until data is available.<br><br>
+
+ From documentation for {@link #getOutputStream(Socket, long)} : <br>
+ Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see #getOutputStream(Socket, long)
+
+ @param socket
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetUtils -->
+ <!-- start class org.apache.hadoop.net.NetworkTopology -->
+ <class name="NetworkTopology" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetworkTopology"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Add a leaf node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be added
+ @exception IllegalArgumentException if add a node to a leave
+ or node to be added is not a leaf]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Remove a node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be removed]]>
+ </doc>
+ </method>
+ <method name="contains" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if the tree contains node <i>node</i>
+
+ @param node
+ a node
+ @return true if <i>node</i> is already in the tree; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="loc" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a string representation of a node, return its reference
+
+ @param loc
+ a path-like string representation of a node
+ @return a reference to the node; null if the node is not in the tree]]>
+ </doc>
+ </method>
+ <method name="getNumOfRacks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of racks]]>
+ </doc>
+ </method>
+ <method name="getNumOfLeaves" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of nodes]]>
+ </doc>
+ </method>
+ <method name="getDistance" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return the distance between two nodes
+ It is assumed that the distance from one node to its parent is 1
+ The distance between two nodes is calculated by summing up their distances
+ to their closest common ancestor.
+ @param node1 one node
+ @param node2 another node
+ @return the distance between node1 and node2
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="isOnSameRack" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if two nodes are on the same rack
+ @param node1 one node
+ @param node2 another node
+ @return true if node1 and node2 are pm the same rack; false otherwise
+ @exception IllegalArgumentException when either node1 or node2 is null, or
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="chooseRandom" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <doc>
+ <![CDATA[randomly choose one node from <i>scope</i>
+ if scope starts with ~, choose one from the all nodes except for the
+ ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
+ @param scope range of nodes from which a node will be choosen
+ @return the choosen node]]>
+ </doc>
+ </method>
+ <method name="countNumOfAvailableNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <param name="excludedNodes" type="java.util.List&lt;org.apache.hadoop.net.Node&gt;"/>
+ <doc>
+ <![CDATA[return the number of leaves in <i>scope</i> but not in <i>excludedNodes</i>
+ if scope starts with ~, return the number of nodes that are not
+ in <i>scope</i> and <i>excludedNodes</i>;
+ @param scope a path string that may start with ~
+ @param excludedNodes a list of nodes
+ @return number of available nodes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[convert a network tree to a string]]>
+ </doc>
+ </method>
+ <method name="pseudoSortByDistance"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reader" type="org.apache.hadoop.net.Node"/>
+ <param name="nodes" type="org.apache.hadoop.net.Node[]"/>
+ <doc>
+ <![CDATA[Sort nodes array by their distances to <i>reader</i>
+ It linearly scans the array, if a local node is found, swap it with
+ the first element of the array.
+ If a local rack node is found, swap it with the first element following
+ the local node.
+ If neither local node or local rack node is found, put a random replica
+ location at postion 0.
+ It leaves the rest nodes untouched.]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_RACK" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UNRESOLVED" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_HOST_LEVEL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The class represents a cluster of computer with a tree hierarchical
+ network topology.
+ For example, a cluster may be consists of many data centers filled
+ with racks of computers.
+ In a network topology, leaves represent data nodes (computers) and inner
+ nodes represent switches/routers that manage traffic in/out of data centers
+ or racks.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetworkTopology -->
+ <!-- start interface org.apache.hadoop.net.Node -->
+ <interface name="Node" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the string representation of this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the node's network location]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface defines a node in a network topology.
+ A node may be a leave representing a data node or an inner
+ node representing a datacenter or rack.
+ Each data has a name and its location in the network is
+ decided by a string with syntax similar to a file name.
+ For example, a data node's name is hostname:port# and if it's located at
+ rack "orange" in datacenter "dog", the string representation of its
+ network location is /dog/orange]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.Node -->
+ <!-- start class org.apache.hadoop.net.NodeBase -->
+ <class name="NodeBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.Node"/>
+ <constructor name="NodeBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its path
+ @param path
+ a concatenation of this node's location, the path seperator, and its name]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String, org.apache.hadoop.net.Node, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location
+ @param parent this node's parent node
+ @param level this node's level in the tree]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set this node's network location]]>
+ </doc>
+ </method>
+ <method name="getPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return this node's path]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's string representation]]>
+ </doc>
+ </method>
+ <method name="normalize" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Normalize a path]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="level" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree]]>
+ </doc>
+ </method>
+ <field name="PATH_SEPARATOR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PATH_SEPARATOR_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ROOT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="name" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="location" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="level" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="parent" type="org.apache.hadoop.net.Node"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class that implements interface Node]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NodeBase -->
+ <!-- start class org.apache.hadoop.net.ScriptBasedMapping -->
+ <class name="ScriptBasedMapping" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.net.DNSToSwitchMapping"/>
+ <constructor name="ScriptBasedMapping"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="resolve" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List&lt;java.lang.String&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[This class implements the {@link DNSToSwitchMapping} interface using a
+ script configured via topology.script.file.name .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.ScriptBasedMapping -->
+ <!-- start class org.apache.hadoop.net.SocketInputStream -->
+ <class name="SocketInputStream" extends="java.io.InputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.ReadableByteChannel"/>
+ <constructor name="SocketInputStream" type="java.nio.channels.ReadableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for reading, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), timeout): <br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), socket.getSoTimeout())
+ :<br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.ReadableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by inputstream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferFrom(ReadableByteChannel, long, long)}.]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This implements an input stream that can have a timeout while reading.
+ This sets non-blocking flag on the socket channel.
+ So after create this object, read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} for the associated socket will throw
+ IllegalBlockingModeException.
+ Please use {@link SocketOutputStream} for writing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketInputStream -->
+ <!-- start class org.apache.hadoop.net.SocketOutputStream -->
+ <class name="SocketOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.WritableByteChannel"/>
+ <constructor name="SocketOutputStream" type="java.nio.channels.WritableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for writing, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketOutputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketOutputStream(socket.getChannel(), timeout):<br><br>
+
+ Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketOutputStream#SocketOutputStream(WritableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.WritableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by this stream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This implements an output stream that can have a timeout while writing.
+ This sets non-blocking flag on the socket channel.
+ So after creating this object , read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} on the associated socket will throw
+ llegalBlockingModeException.
+ Please use {@link SocketInputStream} for reading.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketOutputStream -->
+ <!-- start class org.apache.hadoop.net.SocksSocketFactory -->
+ <class name="SocksSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="SocksSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <constructor name="SocksSocketFactory" type="java.net.Proxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with a supplied Proxy
+
+ @param proxy the proxy to use to create sockets]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocksSocketFactory -->
+ <!-- start class org.apache.hadoop.net.StandardSocketFactory -->
+ <class name="StandardSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StandardSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.StandardSocketFactory -->
+ <doc>
+ <![CDATA[Network-related classes.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.record">
+ <!-- start class org.apache.hadoop.record.BinaryRecordInput -->
+ <class name="BinaryRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="BinaryRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordInput" type="java.io.DataInput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inp" type="java.io.DataInput"/>
+ <doc>
+ <![CDATA[Get a thread-local record input for the supplied DataInput.
+ @param inp data input stream
+ @return binary record input corresponding to the supplied DataInput.]]>
+ </doc>
+ </method>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordInput -->
+ <!-- start class org.apache.hadoop.record.BinaryRecordOutput -->
+ <class name="BinaryRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="BinaryRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordOutput" type="java.io.DataOutput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <doc>
+ <![CDATA[Get a thread-local record output for the supplied DataOutput.
+ @param out data output stream
+ @return binary record output corresponding to the supplied DataOutput.]]>
+ </doc>
+ </method>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordOutput -->
+ <!-- start class org.apache.hadoop.record.Buffer -->
+ <class name="Buffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Buffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-count sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte array as the initial value.
+
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[], int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte range as the initial value.
+
+ @param bytes Copy of this array becomes the backing storage for the object.
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Use the specified bytes array as underlying sequence.
+
+ @param bytes byte sequence]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Copy the specified byte array to the Buffer. Replaces the current buffer.
+
+ @param bytes byte array to be assigned
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the Buffer.
+
+ @return The data is only valid between 0 and getCount() - 1.]]>
+ </doc>
+ </method>
+ <method name="getCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current count of the buffer.]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum count that could handled without
+ resizing the backing storage.
+
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newCapacity" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved if newCapacity >= getCount().
+ @param newCapacity The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the buffer to 0 size]]>
+ </doc>
+ </method>
+ <method name="truncate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Change the capacity of the backing store to be the same as the current
+ count of buffer.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer.
+
+ @param bytes byte array to be appended
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer
+
+ @param bytes byte array to be appended]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Define the sort order of the Buffer.
+
+ @param other The other buffer
+ @return Positive if this is bigger than other, 0 if they are equal, and
+ negative if this is smaller than other.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="charsetName" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ <doc>
+ <![CDATA[Convert the byte buffer to a string an specific character encoding
+
+ @param charsetName Valid Java Character Set Name]]>
+ </doc>
+ </method>
+ <method name="clone" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="CloneNotSupportedException" type="java.lang.CloneNotSupportedException"/>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is used as a Java native type for buffer.
+ It is resizable and distinguishes between the count of the seqeunce and
+ the current capacity.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Buffer -->
+ <!-- start class org.apache.hadoop.record.CsvRecordInput -->
+ <class name="CsvRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="CsvRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordInput -->
+ <!-- start class org.apache.hadoop.record.CsvRecordOutput -->
+ <class name="CsvRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="CsvRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordOutput -->
+ <!-- start interface org.apache.hadoop.record.Index -->
+ <interface name="Index" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="done" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="incr"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Interface that acts as an iterator for deserializing maps.
+ The deserializer returns an instance that the record uses to
+ read vectors and maps. An example of usage is as follows:
+
+ <code>
+ Index idx = startVector(...);
+ while (!idx.done()) {
+ .... // read element of a vector
+ idx.incr();
+ }
+ </code>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.Index -->
+ <!-- start class org.apache.hadoop.record.Record -->
+ <class name="Record" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Record"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="serialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record with tag (ususally field name)
+ @param rout Record output destination
+ @param tag record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record with a tag (usually field name)
+ @param rin Record input source
+ @param tag Record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record without a tag
+ @param rout Record output destination]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record without a tag
+ @param rin Record input source]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="din" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Abstract class that is extended by generated classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Record -->
+ <!-- start class org.apache.hadoop.record.RecordComparator -->
+ <class name="RecordComparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordComparator" type="java.lang.Class"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a raw {@link Record} comparison implementation.]]>
+ </doc>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.record.RecordComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link Record} implementation.
+
+ @param c record classs for which a raw comparator is provided
+ @param comparator Raw comparator instance for class c]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A raw record comparator base class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.RecordComparator -->
+ <!-- start interface org.apache.hadoop.record.RecordInput -->
+ <interface name="RecordInput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a byte from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a boolean from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a long integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a single-precision float from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a double-precision number from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read byte array from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of elements.]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of map entries.]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that all the Deserializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordInput -->
+ <!-- start interface org.apache.hadoop.record.RecordOutput -->
+ <interface name="RecordOutput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a byte to serialized record.
+ @param b Byte to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a boolean to serialized record.
+ @param b Boolean to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write an integer to serialized record.
+ @param i Integer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a long integer to serialized record.
+ @param l Long to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a single-precision float to serialized record.
+ @param f Float to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a double precision floating point number to serialized record.
+ @param d Double to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a unicode string to serialized record.
+ @param s String to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a buffer to serialized record.
+ @param buf Buffer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a record to be serialized.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized record.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a vector to be serialized.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized vector.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a map to be serialized.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized map.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that alll the serializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordOutput -->
+ <!-- start class org.apache.hadoop.record.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a stream and return it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a stream and returns it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an int to a binary stream with zero-compressed encoding.
+
+ @param stream Binary output stream
+ @param i int to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <field name="hexchars" type="char[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Utils -->
+ <!-- start class org.apache.hadoop.record.XmlRecordInput -->
+ <class name="XmlRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="XmlRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Deserializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordInput -->
+ <!-- start class org.apache.hadoop.record.XmlRecordOutput -->
+ <class name="XmlRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="XmlRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Serializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordOutput -->
+ <doc>
+ <![CDATA[Hadoop record I/O contains classes and a record description language
+ translator for simplifying serialization and deserialization of records in a
+ language-neutral manner.
+
+ <h2>Introduction</h2>
+
+ Software systems of any significant complexity require mechanisms for data
+interchange with the outside world. These interchanges typically involve the
+marshaling and unmarshaling of logical units of data to and from data streams
+(files, network connections, memory buffers etc.). Applications usually have
+some code for serializing and deserializing the data types that they manipulate
+embedded in them. The work of serialization has several features that make
+automatic code generation for it worthwhile. Given a particular output encoding
+(binary, XML, etc.), serialization of primitive types and simple compositions
+of primitives (structs, vectors etc.) is a very mechanical task. Manually
+written serialization code can be susceptible to bugs especially when records
+have a large number of fields or a record definition changes between software
+versions. Lastly, it can be very useful for applications written in different
+programming languages to be able to share and interchange data. This can be
+made a lot easier by describing the data records manipulated by these
+applications in a language agnostic manner and using the descriptions to derive
+implementations of serialization in multiple target languages.
+
+This document describes Hadoop Record I/O, a mechanism that is aimed
+at
+<ul>
+<li> enabling the specification of simple serializable data types (records)
+<li> enabling the generation of code in multiple target languages for
+marshaling and unmarshaling such types
+<li> providing target language specific support that will enable application
+programmers to incorporate generated code into their applications
+</ul>
+
+The goals of Hadoop Record I/O are similar to those of mechanisms such as XDR,
+ASN.1, PADS and ICE. While these systems all include a DDL that enables
+the specification of most record types, they differ widely in what else they
+focus on. The focus in Hadoop Record I/O is on data marshaling and
+multi-lingual support. We take a translator-based approach to serialization.
+Hadoop users have to describe their data in a simple data description
+language. The Hadoop DDL translator rcc generates code that users
+can invoke in order to read/write their data from/to simple stream
+abstractions. Next we list explicitly some of the goals and non-goals of
+Hadoop Record I/O.
+
+
+<h3>Goals</h3>
+
+<ul>
+<li> Support for commonly used primitive types. Hadoop should include as
+primitives commonly used builtin types from programming languages we intend to
+support.
+
+<li> Support for common data compositions (including recursive compositions).
+Hadoop should support widely used composite types such as structs and
+vectors.
+
+<li> Code generation in multiple target languages. Hadoop should be capable of
+generating serialization code in multiple target languages and should be
+easily extensible to new target languages. The initial target languages are
+C++ and Java.
+
+<li> Support for generated target languages. Hadooop should include support
+in the form of headers, libraries, packages for supported target languages
+that enable easy inclusion and use of generated code in applications.
+
+<li> Support for multiple output encodings. Candidates include
+packed binary, comma-separated text, XML etc.
+
+<li> Support for specifying record types in a backwards/forwards compatible
+manner. This will probably be in the form of support for optional fields in
+records. This version of the document does not include a description of the
+planned mechanism, we intend to include it in the next iteration.
+
+</ul>
+
+<h3>Non-Goals</h3>
+
+<ul>
+ <li> Serializing existing arbitrary C++ classes.
+ <li> Serializing complex data structures such as trees, linked lists etc.
+ <li> Built-in indexing schemes, compression, or check-sums.
+ <li> Dynamic construction of objects from an XML schema.
+</ul>
+
+The remainder of this document describes the features of Hadoop record I/O
+in more detail. Section 2 describes the data types supported by the system.
+Section 3 lays out the DDL syntax with some examples of simple records.
+Section 4 describes the process of code generation with rcc. Section 5
+describes target language mappings and support for Hadoop types. We include a
+fairly complete description of C++ mappings with intent to include Java and
+others in upcoming iterations of this document. The last section talks about
+supported output encodings.
+
+
+<h2>Data Types and Streams</h2>
+
+This section describes the primitive and composite types supported by Hadoop.
+We aim to support a set of types that can be used to simply and efficiently
+express a wide range of record types in different programming languages.
+
+<h3>Primitive Types</h3>
+
+For the most part, the primitive types of Hadoop map directly to primitive
+types in high level programming languages. Special cases are the
+ustring (a Unicode string) and buffer types, which we believe
+find wide use and which are usually implemented in library code and not
+available as language built-ins. Hadoop also supplies these via library code
+when a target language built-in is not present and there is no widely
+adopted "standard" implementation. The complete list of primitive types is:
+
+<ul>
+ <li> byte: An 8-bit unsigned integer.
+ <li> boolean: A boolean value.
+ <li> int: A 32-bit signed integer.
+ <li> long: A 64-bit signed integer.
+ <li> float: A single precision floating point number as described by
+ IEEE-754.
+ <li> double: A double precision floating point number as described by
+ IEEE-754.
+ <li> ustring: A string consisting of Unicode characters.
+ <li> buffer: An arbitrary sequence of bytes.
+</ul>
+
+
+<h3>Composite Types</h3>
+Hadoop supports a small set of composite types that enable the description
+of simple aggregate types and containers. A composite type is serialized
+by sequentially serializing it constituent elements. The supported
+composite types are:
+
+<ul>
+
+ <li> record: An aggregate type like a C-struct. This is a list of
+typed fields that are together considered a single unit of data. A record
+is serialized by sequentially serializing its constituent fields. In addition
+to serialization a record has comparison operations (equality and less-than)
+implemented for it, these are defined as memberwise comparisons.
+
+ <li>vector: A sequence of entries of the same data type, primitive
+or composite.
+
+ <li> map: An associative container mapping instances of a key type to
+instances of a value type. The key and value types may themselves be primitive
+or composite types.
+
+</ul>
+
+<h3>Streams</h3>
+
+Hadoop generates code for serializing and deserializing record types to
+abstract streams. For each target language Hadoop defines very simple input
+and output stream interfaces. Application writers can usually develop
+concrete implementations of these by putting a one method wrapper around
+an existing stream implementation.
+
+
+<h2>DDL Syntax and Examples</h2>
+
+We now describe the syntax of the Hadoop data description language. This is
+followed by a few examples of DDL usage.
+
+<h3>Hadoop DDL Syntax</h3>
+
+<pre><code>
+recfile = *include module *record
+include = "include" path
+path = (relative-path / absolute-path)
+module = "module" module-name
+module-name = name *("." name)
+record := "class" name "{" 1*(field) "}"
+field := type name ";"
+name := ALPHA (ALPHA / DIGIT / "_" )*
+type := (ptype / ctype)
+ptype := ("byte" / "boolean" / "int" |
+ "long" / "float" / "double"
+ "ustring" / "buffer")
+ctype := (("vector" "<" type ">") /
+ ("map" "<" type "," type ">" ) ) / name)
+</code></pre>
+
+A DDL file describes one or more record types. It begins with zero or
+more include declarations, a single mandatory module declaration
+followed by zero or more class declarations. The semantics of each of
+these declarations are described below:
+
+<ul>
+
+<li>include: An include declaration specifies a DDL file to be
+referenced when generating code for types in the current DDL file. Record types
+in the current compilation unit may refer to types in all included files.
+File inclusion is recursive. An include does not trigger code
+generation for the referenced file.
+
+<li> module: Every Hadoop DDL file must have a single module
+declaration that follows the list of includes and precedes all record
+declarations. A module declaration identifies a scope within which
+the names of all types in the current file are visible. Module names are
+mapped to C++ namespaces, Java packages etc. in generated code.
+
+<li> class: Records types are specified through class
+declarations. A class declaration is like a Java class declaration.
+It specifies a named record type and a list of fields that constitute records
+of the type. Usage is illustrated in the following examples.
+
+</ul>
+
+<h3>Examples</h3>
+
+<ul>
+<li>A simple DDL file links.jr with just one record declaration.
+<pre><code>
+module links {
+ class Link {
+ ustring URL;
+ boolean isRelative;
+ ustring anchorText;
+ };
+}
+</code></pre>
+
+<li> A DDL file outlinks.jr which includes another
+<pre><code>
+include "links.jr"
+
+module outlinks {
+ class OutLinks {
+ ustring baseURL;
+ vector<links.Link> outLinks;
+ };
+}
+</code></pre>
+</ul>
+
+<h2>Code Generation</h2>
+
+The Hadoop translator is written in Java. Invocation is done by executing a
+wrapper shell script named named rcc. It takes a list of
+record description files as a mandatory argument and an
+optional language argument (the default is Java) --language or
+-l. Thus a typical invocation would look like:
+<pre><code>
+$ rcc -l C++ <filename> ...
+</code></pre>
+
+
+<h2>Target Language Mappings and Support</h2>
+
+For all target languages, the unit of code generation is a record type.
+For each record type, Hadoop generates code for serialization and
+deserialization, record comparison and access to record members.
+
+<h3>C++</h3>
+
+Support for including Hadoop generated C++ code in applications comes in the
+form of a header file recordio.hh which needs to be included in source
+that uses Hadoop types and a library librecordio.a which applications need
+to be linked with. The header declares the Hadoop C++ namespace which defines
+appropriate types for the various primitives, the basic interfaces for
+records and streams and enumerates the supported serialization encodings.
+Declarations of these interfaces and a description of their semantics follow:
+
+<pre><code>
+namespace hadoop {
+
+ enum RecFormat { kBinary, kXML, kCSV };
+
+ class InStream {
+ public:
+ virtual ssize_t read(void *buf, size_t n) = 0;
+ };
+
+ class OutStream {
+ public:
+ virtual ssize_t write(const void *buf, size_t n) = 0;
+ };
+
+ class IOError : public runtime_error {
+ public:
+ explicit IOError(const std::string& msg);
+ };
+
+ class IArchive;
+ class OArchive;
+
+ class RecordReader {
+ public:
+ RecordReader(InStream& in, RecFormat fmt);
+ virtual ~RecordReader(void);
+
+ virtual void read(Record& rec);
+ };
+
+ class RecordWriter {
+ public:
+ RecordWriter(OutStream& out, RecFormat fmt);
+ virtual ~RecordWriter(void);
+
+ virtual void write(Record& rec);
+ };
+
+
+ class Record {
+ public:
+ virtual std::string type(void) const = 0;
+ virtual std::string signature(void) const = 0;
+ protected:
+ virtual bool validate(void) const = 0;
+
+ virtual void
+ serialize(OArchive& oa, const std::string& tag) const = 0;
+
+ virtual void
+ deserialize(IArchive& ia, const std::string& tag) = 0;
+ };
+}
+</code></pre>
+
+<ul>
+
+<li> RecFormat: An enumeration of the serialization encodings supported
+by this implementation of Hadoop.
+
+<li> InStream: A simple abstraction for an input stream. This has a
+single public read method that reads n bytes from the stream into
+the buffer buf. Has the same semantics as a blocking read system
+call. Returns the number of bytes read or -1 if an error occurs.
+
+<li> OutStream: A simple abstraction for an output stream. This has a
+single write method that writes n bytes to the stream from the
+buffer buf. Has the same semantics as a blocking write system
+call. Returns the number of bytes written or -1 if an error occurs.
+
+<li> RecordReader: A RecordReader reads records one at a time from
+an underlying stream in a specified record format. The reader is instantiated
+with a stream and a serialization format. It has a read method that
+takes an instance of a record and deserializes the record from the stream.
+
+<li> RecordWriter: A RecordWriter writes records one at a
+time to an underlying stream in a specified record format. The writer is
+instantiated with a stream and a serialization format. It has a
+write method that takes an instance of a record and serializes the
+record to the stream.
+
+<li> Record: The base class for all generated record types. This has two
+public methods type and signature that return the typename and the
+type signature of the record.
+
+</ul>
+
+Two files are generated for each record file (note: not for each record). If a
+record file is named "name.jr", the generated files are
+"name.jr.cc" and "name.jr.hh" containing serialization
+implementations and record type declarations respectively.
+
+For each record in the DDL file, the generated header file will contain a
+class definition corresponding to the record type, method definitions for the
+generated type will be present in the '.cc' file. The generated class will
+inherit from the abstract class hadoop::Record. The DDL files
+module declaration determines the namespace the record belongs to.
+Each '.' delimited token in the module declaration results in the
+creation of a namespace. For instance, the declaration module docs.links
+results in the creation of a docs namespace and a nested
+docs::links namespace. In the preceding examples, the Link class
+is placed in the links namespace. The header file corresponding to
+the links.jr file will contain:
+
+<pre><code>
+namespace links {
+ class Link : public hadoop::Record {
+ // ....
+ };
+};
+</code></pre>
+
+Each field within the record will cause the generation of a private member
+declaration of the appropriate type in the class declaration, and one or more
+acccessor methods. The generated class will implement the serialize and
+deserialize methods defined in hadoop::Record+. It will also
+implement the inspection methods type and signature from
+hadoop::Record. A default constructor and virtual destructor will also
+be generated. Serialization code will read/write records into streams that
+implement the hadoop::InStream and the hadoop::OutStream interfaces.
+
+For each member of a record an accessor method is generated that returns
+either the member or a reference to the member. For members that are returned
+by value, a setter method is also generated. This is true for primitive
+data members of the types byte, int, long, boolean, float and
+double. For example, for a int field called MyField the folowing
+code is generated.
+
+<pre><code>
+...
+private:
+ int32_t mMyField;
+ ...
+public:
+ int32_t getMyField(void) const {
+ return mMyField;
+ };
+
+ void setMyField(int32_t m) {
+ mMyField = m;
+ };
+ ...
+</code></pre>
+
+For a ustring or buffer or composite field. The generated code
+only contains accessors that return a reference to the field. A const
+and a non-const accessor are generated. For example:
+
+<pre><code>
+...
+private:
+ std::string mMyBuf;
+ ...
+public:
+
+ std::string& getMyBuf() {
+ return mMyBuf;
+ };
+
+ const std::string& getMyBuf() const {
+ return mMyBuf;
+ };
+ ...
+</code></pre>
+
+<h4>Examples</h4>
+
+Suppose the inclrec.jr file contains:
+<pre><code>
+module inclrec {
+ class RI {
+ int I32;
+ double D;
+ ustring S;
+ };
+}
+</code></pre>
+
+and the testrec.jr file contains:
+
+<pre><code>
+include "inclrec.jr"
+module testrec {
+ class R {
+ vector<float> VF;
+ RI Rec;
+ buffer Buf;
+ };
+}
+</code></pre>
+
+Then the invocation of rcc such as:
+<pre><code>
+$ rcc -l c++ inclrec.jr testrec.jr
+</code></pre>
+will result in generation of four files:
+inclrec.jr.{cc,hh} and testrec.jr.{cc,hh}.
+
+The inclrec.jr.hh will contain:
+
+<pre><code>
+#ifndef _INCLREC_JR_HH_
+#define _INCLREC_JR_HH_
+
+#include "recordio.hh"
+
+namespace inclrec {
+
+ class RI : public hadoop::Record {
+
+ private:
+
+ int32_t I32;
+ double D;
+ std::string S;
+
+ public:
+
+ RI(void);
+ virtual ~RI(void);
+
+ virtual bool operator==(const RI& peer) const;
+ virtual bool operator<(const RI& peer) const;
+
+ virtual int32_t getI32(void) const { return I32; }
+ virtual void setI32(int32_t v) { I32 = v; }
+
+ virtual double getD(void) const { return D; }
+ virtual void setD(double v) { D = v; }
+
+ virtual std::string& getS(void) const { return S; }
+ virtual const std::string& getS(void) const { return S; }
+
+ virtual std::string type(void) const;
+ virtual std::string signature(void) const;
+
+ protected:
+
+ virtual void serialize(hadoop::OArchive& a) const;
+ virtual void deserialize(hadoop::IArchive& a);
+ };
+} // end namespace inclrec
+
+#endif /* _INCLREC_JR_HH_ */
+
+</code></pre>
+
+The testrec.jr.hh file will contain:
+
+
+<pre><code>
+
+#ifndef _TESTREC_JR_HH_
+#define _TESTREC_JR_HH_
+
+#include "inclrec.jr.hh"
+
+namespace testrec {
+ class R : public hadoop::Record {
+
+ private:
+
+ std::vector<float> VF;
+ inclrec::RI Rec;
+ std::string Buf;
+
+ public:
+
+ R(void);
+ virtual ~R(void);
+
+ virtual bool operator==(const R& peer) const;
+ virtual bool operator<(const R& peer) const;
+
+ virtual std::vector<float>& getVF(void) const;
+ virtual const std::vector<float>& getVF(void) const;
+
+ virtual std::string& getBuf(void) const ;
+ virtual const std::string& getBuf(void) const;
+
+ virtual inclrec::RI& getRec(void) const;
+ virtual const inclrec::RI& getRec(void) const;
+
+ virtual bool serialize(hadoop::OutArchive& a) const;
+ virtual bool deserialize(hadoop::InArchive& a);
+
+ virtual std::string type(void) const;
+ virtual std::string signature(void) const;
+ };
+}; // end namespace testrec
+#endif /* _TESTREC_JR_HH_ */
+
+</code></pre>
+
+<h3>Java</h3>
+
+Code generation for Java is similar to that for C++. A Java class is generated
+for each record type with private members corresponding to the fields. Getters
+and setters for fields are also generated. Some differences arise in the
+way comparison is expressed and in the mapping of modules to packages and
+classes to files. For equality testing, an equals method is generated
+for each record type. As per Java requirements a hashCode method is also
+generated. For comparison a compareTo method is generated for each
+record type. This has the semantics as defined by the Java Comparable
+interface, that is, the method returns a negative integer, zero, or a positive
+integer as the invoked object is less than, equal to, or greater than the
+comparison parameter.
+
+A .java file is generated per record type as opposed to per DDL
+file as in C++. The module declaration translates to a Java
+package declaration. The module name maps to an identical Java package
+name. In addition to this mapping, the DDL compiler creates the appropriate
+directory hierarchy for the package and places the generated .java
+files in the correct directories.
+
+<h2>Mapping Summary</h2>
+
+<pre><code>
+DDL Type C++ Type Java Type
+
+boolean bool boolean
+byte int8_t byte
+int int32_t int
+long int64_t long
+float float float
+double double double
+ustring std::string java.lang.String
+buffer std::string org.apache.hadoop.record.Buffer
+class type class type class type
+vector<type> std::vector<type> java.util.ArrayList<type>
+map<type,type> std::map<type,type> java.util.TreeMap<type,type>
+</code></pre>
+
+<h2>Data encodings</h2>
+
+This section describes the format of the data encodings supported by Hadoop.
+Currently, three data encodings are supported, namely binary, CSV and XML.
+
+<h3>Binary Serialization Format</h3>
+
+The binary data encoding format is fairly dense. Serialization of composite
+types is simply defined as a concatenation of serializations of the constituent
+elements (lengths are included in vectors and maps).
+
+Composite types are serialized as follows:
+<ul>
+<li> class: Sequence of serialized members.
+<li> vector: The number of elements serialized as an int. Followed by a
+sequence of serialized elements.
+<li> map: The number of key value pairs serialized as an int. Followed
+by a sequence of serialized (key,value) pairs.
+</ul>
+
+Serialization of primitives is more interesting, with a zero compression
+optimization for integral types and normalization to UTF-8 for strings.
+Primitive types are serialized as follows:
+
+<ul>
+<li> byte: Represented by 1 byte, as is.
+<li> boolean: Represented by 1-byte (0 or 1)
+<li> int/long: Integers and longs are serialized zero compressed.
+Represented as 1-byte if -120 <= value < 128. Otherwise, serialized as a
+sequence of 2-5 bytes for ints, 2-9 bytes for longs. The first byte represents
+the number of trailing bytes, N, as the negative number (-120-N). For example,
+the number 1024 (0x400) is represented by the byte sequence 'x86 x04 x00'.
+This doesn't help much for 4-byte integers but does a reasonably good job with
+longs without bit twiddling.
+<li> float/double: Serialized in IEEE 754 single and double precision
+format in network byte order. This is the format used by Java.
+<li> ustring: Serialized as 4-byte zero compressed length followed by
+data encoded as UTF-8. Strings are normalized to UTF-8 regardless of native
+language representation.
+<li> buffer: Serialized as a 4-byte zero compressed length followed by the
+raw bytes in the buffer.
+</ul>
+
+
+<h3>CSV Serialization Format</h3>
+
+The CSV serialization format has a lot more structure than the "standard"
+Excel CSV format, but we believe the additional structure is useful because
+
+<ul>
+<li> it makes parsing a lot easier without detracting too much from legibility
+<li> the delimiters around composites make it obvious when one is reading a
+sequence of Hadoop records
+</ul>
+
+Serialization formats for the various types are detailed in the grammar that
+follows. The notable feature of the formats is the use of delimiters for
+indicating the certain field types.
+
+<ul>
+<li> A string field begins with a single quote (').
+<li> A buffer field begins with a sharp (#).
+<li> A class, vector or map begins with 's{', 'v{' or 'm{' respectively and
+ends with '}'.
+</ul>
+
+The CSV format can be described by the following grammar:
+
+<pre><code>
+record = primitive / struct / vector / map
+primitive = boolean / int / long / float / double / ustring / buffer
+
+boolean = "T" / "F"
+int = ["-"] 1*DIGIT
+long = ";" ["-"] 1*DIGIT
+float = ["-"] 1*DIGIT "." 1*DIGIT ["E" / "e" ["-"] 1*DIGIT]
+double = ";" ["-"] 1*DIGIT "." 1*DIGIT ["E" / "e" ["-"] 1*DIGIT]
+
+ustring = "'" *(UTF8 char except NULL, LF, % and , / "%00" / "%0a" / "%25" / "%2c" )
+
+buffer = "#" *(BYTE except NULL, LF, % and , / "%00" / "%0a" / "%25" / "%2c" )
+
+struct = "s{" record *("," record) "}"
+vector = "v{" [record *("," record)] "}"
+map = "m{" [*(record "," record)] "}"
+</code></pre>
+
+<h3>XML Serialization Format</h3>
+
+The XML serialization format is the same used by Apache XML-RPC
+(http://ws.apache.org/xmlrpc/types.html). This is an extension of the original
+XML-RPC format and adds some additional data types. All record I/O types are
+not directly expressible in this format, and access to a DDL is required in
+order to convert these to valid types. All types primitive or composite are
+represented by &lt;value&gt; elements. The particular XML-RPC type is
+indicated by a nested element in the &lt;value&gt; element. The encoding for
+records is always UTF-8. Primitive types are serialized as follows:
+
+<ul>
+<li> byte: XML tag &lt;ex:i1&gt;. Values: 1-byte unsigned
+integers represented in US-ASCII
+<li> boolean: XML tag &lt;boolean&gt;. Values: "0" or "1"
+<li> int: XML tags &lt;i4&gt; or &lt;int&gt;. Values: 4-byte
+signed integers represented in US-ASCII.
+<li> long: XML tag &lt;ex:i8&gt;. Values: 8-byte signed integers
+represented in US-ASCII.
+<li> float: XML tag &lt;ex:float&gt;. Values: Single precision
+floating point numbers represented in US-ASCII.
+<li> double: XML tag &lt;double&gt;. Values: Double precision
+floating point numbers represented in US-ASCII.
+<li> ustring: XML tag &lt;;string&gt;. Values: String values
+represented as UTF-8. XML does not permit all Unicode characters in literal
+data. In particular, NULLs and control chars are not allowed. Additionally,
+XML processors are required to replace carriage returns with line feeds and to
+replace CRLF sequences with line feeds. Programming languages that we work
+with do not impose these restrictions on string types. To work around these
+restrictions, disallowed characters and CRs are percent escaped in strings.
+The '%' character is also percent escaped.
+<li> buffer: XML tag &lt;string&&gt;. Values: Arbitrary binary
+data. Represented as hexBinary, each byte is replaced by its 2-byte
+hexadecimal representation.
+</ul>
+
+Composite types are serialized as follows:
+
+<ul>
+<li> class: XML tag &lt;struct&gt;. A struct is a sequence of
+&lt;member&gt; elements. Each &lt;member&gt; element has a &lt;name&gt;
+element and a &lt;value&gt; element. The &lt;name&gt; is a string that must
+match /[a-zA-Z][a-zA-Z0-9_]*/. The value of the member is represented
+by a &lt;value&gt; element.
+
+<li> vector: XML tag &lt;array&lt;. An &lt;array&gt; contains a
+single &lt;data&gt; element. The &lt;data&gt; element is a sequence of
+&lt;value&gt; elements each of which represents an element of the vector.
+
+<li> map: XML tag &lt;array&gt;. Same as vector.
+
+</ul>
+
+For example:
+
+<pre><code>
+class {
+ int MY_INT; // value 5
+ vector<float> MY_VEC; // values 0.1, -0.89, 2.45e4
+ buffer MY_BUF; // value '\00\n\tabc%'
+}
+</code></pre>
+
+is serialized as
+
+<pre><code class="XML">
+&lt;value&gt;
+ &lt;struct&gt;
+ &lt;member&gt;
+ &lt;name&gt;MY_INT&lt;/name&gt;
+ &lt;value&gt;&lt;i4&gt;5&lt;/i4&gt;&lt;/value&gt;
+ &lt;/member&gt;
+ &lt;member&gt;
+ &lt;name&gt;MY_VEC&lt;/name&gt;
+ &lt;value&gt;
+ &lt;array&gt;
+ &lt;data&gt;
+ &lt;value&gt;&lt;ex:float&gt;0.1&lt;/ex:float&gt;&lt;/value&gt;
+ &lt;value&gt;&lt;ex:float&gt;-0.89&lt;/ex:float&gt;&lt;/value&gt;
+ &lt;value&gt;&lt;ex:float&gt;2.45e4&lt;/ex:float&gt;&lt;/value&gt;
+ &lt;/data&gt;
+ &lt;/array&gt;
+ &lt;/value&gt;
+ &lt;/member&gt;
+ &lt;member&gt;
+ &lt;name&gt;MY_BUF&lt;/name&gt;
+ &lt;value&gt;&lt;string&gt;%00\n\tabc%25&lt;/string&gt;&lt;/value&gt;
+ &lt;/member&gt;
+ &lt;/struct&gt;
+&lt;/value&gt;
+</code></pre>]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.record.compiler">
+ <!-- start class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <class name="CodeBuffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A wrapper around StringBuffer that automatically does indentation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.Consts -->
+ <class name="Consts" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="RIO_PREFIX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_VAR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER_FIELDS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_OUTPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_INPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TAG" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[const definitions for Record I/O compiler]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.Consts -->
+ <!-- start class org.apache.hadoop.record.compiler.JBoolean -->
+ <class name="JBoolean" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBoolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBoolean]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBoolean -->
+ <!-- start class org.apache.hadoop.record.compiler.JBuffer -->
+ <class name="JBuffer" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBuffer]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "buffer" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.JByte -->
+ <class name="JByte" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JByte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "byte" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JByte -->
+ <!-- start class org.apache.hadoop.record.compiler.JDouble -->
+ <class name="JDouble" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JDouble"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JDouble]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JDouble -->
+ <!-- start class org.apache.hadoop.record.compiler.JField -->
+ <class name="JField" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JField" type="java.lang.String, T"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JField]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[A thin wrappper around record field.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JField -->
+ <!-- start class org.apache.hadoop.record.compiler.JFile -->
+ <class name="JFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFile" type="java.lang.String, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JFile&gt;, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFile
+
+ @param name possibly full pathname to the file
+ @param inclFiles included files (as JFile)
+ @param recList List of records defined within this file]]>
+ </doc>
+ </constructor>
+ <method name="genCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <param name="destDir" type="java.lang.String"/>
+ <param name="options" type="java.util.ArrayList&lt;java.lang.String&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate record code in given language. Language should be all
+ lowercase.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Container for the Hadoop Record DDL.
+ The main components of the file are filename, list of included files,
+ and records defined in that file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFile -->
+ <!-- start class org.apache.hadoop.record.compiler.JFloat -->
+ <class name="JFloat" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFloat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFloat]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFloat -->
+ <!-- start class org.apache.hadoop.record.compiler.JInt -->
+ <class name="JInt" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JInt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JInt]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "int" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JInt -->
+ <!-- start class org.apache.hadoop.record.compiler.JLong -->
+ <class name="JLong" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JLong"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JLong]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "long" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JLong -->
+ <!-- start class org.apache.hadoop.record.compiler.JMap -->
+ <class name="JMap" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JMap" type="org.apache.hadoop.record.compiler.JType, org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JMap]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JMap -->
+ <!-- start class org.apache.hadoop.record.compiler.JRecord -->
+ <class name="JRecord" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JRecord" type="java.lang.String, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JField&lt;org.apache.hadoop.record.compiler.JType&gt;&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JRecord]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JRecord -->
+ <!-- start class org.apache.hadoop.record.compiler.JString -->
+ <class name="JString" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JString"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JString]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JString -->
+ <!-- start class org.apache.hadoop.record.compiler.JType -->
+ <class name="JType" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Abstract Base class for all types supported by Hadoop Record I/O.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JType -->
+ <!-- start class org.apache.hadoop.record.compiler.JVector -->
+ <class name="JVector" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JVector" type="org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JVector]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JVector -->
+ <doc>
+ <![CDATA[This package contains classes needed for code generation
+ from the hadoop record compiler. CppGenerator and JavaGenerator
+ are the main entry points from the parser. There are classes
+ corrsponding to every primitive type and compound type
+ included in Hadoop record I/O syntax.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.record.compiler.ant">
+ <!-- start class org.apache.hadoop.record.compiler.ant.RccTask -->
+ <class name="RccTask" extends="org.apache.tools.ant.Task"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RccTask"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of RccTask]]>
+ </doc>
+ </constructor>
+ <method name="setLanguage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the output language option
+ @param language "java"/"c++"]]>
+ </doc>
+ </method>
+ <method name="setFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets the record definition file attribute
+ @param file record definition file]]>
+ </doc>
+ </method>
+ <method name="setFailonerror"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="flag" type="boolean"/>
+ <doc>
+ <![CDATA[Given multiple files (via fileset), set the error handling behavior
+ @param flag true will throw build exception in case of failure (default)]]>
+ </doc>
+ </method>
+ <method name="setDestdir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets directory where output files will be generated
+ @param dir output directory]]>
+ </doc>
+ </method>
+ <method name="addFileset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="set" type="org.apache.tools.ant.types.FileSet"/>
+ <doc>
+ <![CDATA[Adds a fileset that can consist of one or more files
+ @param set Set of record definition files]]>
+ </doc>
+ </method>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="BuildException" type="org.apache.tools.ant.BuildException"/>
+ <doc>
+ <![CDATA[Invoke the Hadoop record compiler on each record definition file]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Hadoop record compiler ant Task
+<p> This task takes the given record definition files and compiles them into
+ java or c++
+ files. It is then up to the user to compile the generated files.
+
+ <p> The task requires the <code>file</code> or the nested fileset element to be
+ specified. Optional attributes are <code>language</code> (set the output
+ language, default is "java"),
+ <code>destdir</code> (name of the destination directory for generated java/c++
+ code, default is ".") and <code>failonerror</code> (specifies error handling
+ behavior. default is true).
+ <p><h4>Usage</h4>
+ <pre>
+ &lt;recordcc
+ destdir="${basedir}/gensrc"
+ language="java"&gt;
+ &lt;fileset include="**\/*.jr" /&gt;
+ &lt;/recordcc&gt;
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.ant.RccTask -->
+</package>
+<package name="org.apache.hadoop.record.compiler.generated">
+ <!-- start class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <class name="ParseException" extends="java.lang.Exception"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ParseException" type="org.apache.hadoop.record.compiler.generated.Token, int[][], java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constructor is used by the method "generateParseException"
+ in the generated parser. Calling this constructor generates
+ a new object of this type with the fields "currentToken",
+ "expectedTokenSequences", and "tokenImage" set. The boolean
+ flag "specialConstructor" is also set to true to indicate that
+ this constructor was used to create this object.
+ This constructor calls its super class with the empty string
+ to force the "toString" method of parent class "Throwable" to
+ print the error message in the form:
+ ParseException: <result of getMessage>]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The following constructors are for use by you for whatever
+ purpose you can think of. Constructing the exception in this
+ manner makes the exception behave in the normal way - i.e., as
+ documented in the class "Throwable". The fields "errorToken",
+ "expectedTokenSequences", and "tokenImage" do not contain
+ relevant information. The JavaCC generated code does not use
+ these constructors.]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method has the standard behavior when this object has been
+ created using the standard constructors. Otherwise, it uses
+ "currentToken" and "expectedTokenSequences" to generate a parse
+ error message and returns it. If this object has been created
+ due to a parse error, and you do not catch it (it gets thrown
+ from the parser), then this method is called during the printing
+ of the final stack trace, and hence the correct error message
+ gets displayed.]]>
+ </doc>
+ </method>
+ <method name="add_escapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Used to convert raw characters to their escaped version
+ when these raw version cannot be used as part of an ASCII
+ string literal.]]>
+ </doc>
+ </method>
+ <field name="specialConstructor" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This variable determines which constructor was used to create
+ this object and thereby affects the semantics of the
+ "getMessage" method (see below).]]>
+ </doc>
+ </field>
+ <field name="currentToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is the last token that has been consumed successfully. If
+ this object has been created due to a parse error, the token
+ followng this token will (therefore) be the first error token.]]>
+ </doc>
+ </field>
+ <field name="expectedTokenSequences" type="int[][]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Each entry in this array is an array of integers. Each array
+ of integers represents a sequence of tokens (by their ordinal
+ values) that is expected at this point of the parse.]]>
+ </doc>
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is a reference to the "tokenImage" array of the generated
+ parser within which the parse error occurred. This array is
+ defined in the generated ...Constants interface.]]>
+ </doc>
+ </field>
+ <field name="eol" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The end of line string for this machine.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This exception is thrown when parse errors are encountered.
+ You can explicitly create objects of this exception type by
+ calling the method generateParseException in the generated
+ parser.
+
+ You can modify this class to customize your error reporting
+ mechanisms so long as you retain the public fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <class name="Rcc" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="Rcc" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="usage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="driver" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="Input" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Include" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Module" return="java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ModuleName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="RecordList" return="java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Record" return="org.apache.hadoop.record.compiler.JRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Field" return="org.apache.hadoop.record.compiler.JField&lt;org.apache.hadoop.record.compiler.JType&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Type" return="org.apache.hadoop.record.compiler.JType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Map" return="org.apache.hadoop.record.compiler.JMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Vector" return="org.apache.hadoop.record.compiler.JVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tm" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"/>
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <method name="generateParseException" return="org.apache.hadoop.record.compiler.generated.ParseException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="enable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="disable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="token_source" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="token" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jj_nt" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <!-- start interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <interface name="RccConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="EOF" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MODULE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCLUDE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BOOLEAN_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SEMICOLON_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CSTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IDENT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinOneLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinMultiLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <class name="RccTokenManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setDebugStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ds" type="java.io.PrintStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="SwitchTo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="jjFillToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="debugStream" type="java.io.PrintStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjstrLiteralImages" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="lexStateNames" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjnewLexState" type="int[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="input_stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="curChar" type="char"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <class name="SimpleCharStream" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setTabSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="getTabSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="ExpandBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="wrapAround" type="boolean"/>
+ </method>
+ <method name="FillBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="BeginToken" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="UpdateLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="c" type="char"/>
+ </method>
+ <method name="readChar" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getEndColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getEndLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="backup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="amount" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="GetImage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="GetSuffix" return="char[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="int"/>
+ </method>
+ <method name="Done"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="adjustBeginLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newLine" type="int"/>
+ <param name="newCol" type="int"/>
+ <doc>
+ <![CDATA[Method to adjust line and column numbers for the start of a token.]]>
+ </doc>
+ </method>
+ <field name="staticFlag" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufpos" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufline" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufcolumn" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="column" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="line" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsCR" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsLF" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputStream" type="java.io.Reader"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="buffer" type="char[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="maxNextCharInd" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inBuf" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="tabSize" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of interface CharStream, where the stream is assumed to
+ contain only ASCII characters (without unicode processing).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Token -->
+ <class name="Token" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Token"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the image.]]>
+ </doc>
+ </method>
+ <method name="newToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="ofKind" type="int"/>
+ <doc>
+ <![CDATA[Returns a new Token object, by default. However, if you want, you
+ can create and return subclass objects based on the value of ofKind.
+ Simply add the cases to the switch for all those special cases.
+ For example, if you have a subclass of Token called IDToken that
+ you want to create if ofKind is ID, simlpy add something like :
+
+ case MyParserConstants.ID : return new IDToken();
+
+ to the following switch statement. Then you can cast matchedToken
+ variable to the appropriate type and use it in your lexical actions.]]>
+ </doc>
+ </method>
+ <field name="kind" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[An integer that describes the kind of this token. This numbering
+ system is determined by JavaCCParser, and a table of these numbers is
+ stored in the file ...Constants.java.]]>
+ </doc>
+ </field>
+ <field name="beginLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="beginColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="image" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The string image of the token.]]>
+ </doc>
+ </field>
+ <field name="next" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A reference to the next regular (non-special) token from the input
+ stream. If this is the last token from the input stream, or if the
+ token manager has not read tokens beyond this one, this field is
+ set to null. This is true only if this token is also a regular
+ token. Otherwise, see below for a description of the contents of
+ this field.]]>
+ </doc>
+ </field>
+ <field name="specialToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This field is used to access special tokens that occur prior to this
+ token, but after the immediately preceding regular (non-special) token.
+ If there are no such special tokens, this field is set to null.
+ When there are more than one such special token, this field refers
+ to the last of these special tokens, which in turn refers to the next
+ previous special token through its specialToken field, and so on
+ until the first special token (whose specialToken field is null).
+ The next fields of special tokens refer to other special tokens that
+ immediately follow it (without an intervening regular token). If there
+ is no such token, this field is null.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Describes the input token stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Token -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+ <class name="TokenMgrError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TokenMgrError"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="boolean, int, int, int, java.lang.String, char, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addEscapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Replaces unprintable characters by their espaced (or unicode escaped)
+ equivalents in the given string]]>
+ </doc>
+ </method>
+ <method name="LexicalError" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="EOFSeen" type="boolean"/>
+ <param name="lexState" type="int"/>
+ <param name="errorLine" type="int"/>
+ <param name="errorColumn" type="int"/>
+ <param name="errorAfter" type="java.lang.String"/>
+ <param name="curChar" type="char"/>
+ <doc>
+ <![CDATA[Returns a detailed message for the Error when it is thrown by the
+ token manager to indicate a lexical error.
+ Parameters :
+ EOFSeen : indicates if EOF caused the lexicl error
+ curLexState : lexical state in which this error occured
+ errorLine : line number when the error occured
+ errorColumn : column number when the error occured
+ errorAfter : prefix that was seen before this error occured
+ curchar : the offending character
+ Note: You can customize the lexical error message by modifying this method.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[You can also modify the body of this method to customize your error messages.
+ For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not
+ of end-users concern, so you can return something like :
+
+ "Internal Error : Please file a bug report .... "
+
+ from this method for such cases in the release version of your parser.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+ <doc>
+ <![CDATA[This package contains code generated by JavaCC from the
+ Hadoop record syntax file rcc.jj. For details about the
+ record file syntax please @see org.apache.hadoop.record.]]>
+ </doc>
+</package>
+<package name="org.apache.hadoop.record.meta">
+ <!-- start class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <class name="FieldTypeInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's TypeID object]]>
+ </doc>
+ </method>
+ <method name="getFieldID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's id (name)]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two FieldTypeInfos are equal if ach of their fields matches]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ti" type="org.apache.hadoop.record.meta.FieldTypeInfo"/>
+ </method>
+ <doc>
+ <![CDATA[Represents a type information for a field, which is made up of its
+ ID (name) and its type (a TypeID object).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.MapTypeID -->
+ <class name="MapTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapTypeID" type="org.apache.hadoop.record.meta.TypeID, org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getKeyTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's key element]]>
+ </doc>
+ </method>
+ <method name="getValueTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's value element]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two map typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a Map]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.MapTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <class name="RecordTypeInfo" extends="org.apache.hadoop.record.Record"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty RecordTypeInfo object.]]>
+ </doc>
+ </constructor>
+ <constructor name="RecordTypeInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a RecordTypeInfo object representing a record with the given name
+ @param name Name of the record]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the name of the record]]>
+ </doc>
+ </method>
+ <method name="setName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[set the name of the record]]>
+ </doc>
+ </method>
+ <method name="addField"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fieldName" type="java.lang.String"/>
+ <param name="tid" type="org.apache.hadoop.record.meta.TypeID"/>
+ <doc>
+ <![CDATA[Add a field.
+ @param fieldName Name of the field
+ @param tid Type ID of the field]]>
+ </doc>
+ </method>
+ <method name="getFieldTypeInfos" return="java.util.Collection&lt;org.apache.hadoop.record.meta.FieldTypeInfo&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a collection of field type infos]]>
+ </doc>
+ </method>
+ <method name="getNestedStructTypeInfo" return="org.apache.hadoop.record.meta.RecordTypeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the type info of a nested record. We only consider nesting
+ to one level.
+ @param name Name of the nested record]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer_" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ <doc>
+ <![CDATA[This class doesn't implement Comparable as it's not meant to be used
+ for anything besides de/serializing.
+ So we always throw an exception.
+ Not implemented. Always returns 0 if another RecordTypeInfo is passed in.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A record's Type Information object which can read/write itself.
+
+ Type information for a record comprises metadata about the record,
+ as well as a collection of type information for each field in the record.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.StructTypeID -->
+ <class name="StructTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StructTypeID" type="org.apache.hadoop.record.meta.RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a StructTypeID based on the RecordTypeInfo of some record]]>
+ </doc>
+ </constructor>
+ <method name="getFieldTypeInfos" return="java.util.Collection&lt;org.apache.hadoop.record.meta.FieldTypeInfo&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a struct]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.StructTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID -->
+ <class name="TypeID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeVal" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type value. One of the constants in RIOType.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two base typeIDs are equal if they refer to the same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <field name="BoolTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constant classes for the basic types, so we can share them.]]>
+ </doc>
+ </field>
+ <field name="BufferTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ByteTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DoubleTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FloatTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IntTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LongTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="StringTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="typeVal" type="byte"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Represents typeID for basic types.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <class name="TypeID.RIOType" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TypeID.RIOType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <field name="BOOL" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRUCT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[constants representing the IDL types we support]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <!-- start class org.apache.hadoop.record.meta.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <param name="typeID" type="org.apache.hadoop.record.meta.TypeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[read/skip bytes from stream based on a type]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O platform.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.Utils -->
+ <!-- start class org.apache.hadoop.record.meta.VectorTypeID -->
+ <class name="VectorTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VectorTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getElementTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two vector typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for vector.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.VectorTypeID -->
+</package>
+<package name="org.apache.hadoop.security">
+ <!-- start class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <class name="UnixUserGroupInformation" extends="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnixUserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameters user name and its group names.
+ The first entry in the groups list is the default group.
+
+ @param userName a user's name
+ @param groupNames groups list, first of which is the default group
+ @exception IllegalArgumentException if any argument is null]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameter user/group names
+
+ @param ugi an array containing user/group names, the first
+ element of which is the user name, the second of
+ which is the default group name.
+ @exception IllegalArgumentException if the array size is less than 2
+ or any element is null.]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Create an immutable {@link UnixUserGroupInformation} object.]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an array of group names]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the user's name]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize this object
+ First check if this is a UGI in the string format.
+ If no, throw an IOException; otherwise
+ set this object's fields by reading them from the given data input
+
+ @param in input stream
+ @exception IOException is thrown if encounter any error when reading]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize this object
+ First write a string marking that this is a UGI in the string format,
+ then write this object's serialized form to the given data output
+
+ @param out output stream
+ @exception IOException if encounter any error during writing]]>
+ </doc>
+ </method>
+ <method name="saveToConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <param name="ugi" type="org.apache.hadoop.security.UnixUserGroupInformation"/>
+ <doc>
+ <![CDATA[Store the given <code>ugi</code> as a comma separated string in
+ <code>conf</code> as a property <code>attr</code>
+
+ The String starts with the user name followed by the default group names,
+ and other group names.
+
+ @param conf configuration
+ @param attr property name
+ @param ugi a UnixUserGroupInformation]]>
+ </doc>
+ </method>
+ <method name="readFromConf" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Read a UGI from the given <code>conf</code>
+
+ The object is expected to store with the property name <code>attr</code>
+ as a comma separated string that starts
+ with the user name followed by group names.
+ If the property name is not defined, return null.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise, construct a UGI from the configuration, store it in the
+ ugi map and return it.
+
+ @param conf configuration
+ @param attr property name
+ @return a UnixUGI
+ @throws LoginException if the stored string is ill-formatted.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get current user's name and the names of all its groups from Unix.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise get the current user's information from Unix, store it
+ in the map, and return it.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Equivalent to login(conf, false).]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="save" type="boolean"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get a user's name & its group names from the given configuration;
+ If it is not defined in the configuration, get the current user's
+ information from Unix.
+ If the user has a UGI in the ugi map, return the one in
+ the UGI map.
+
+ @param conf either a job configuration or client's configuration
+ @param save saving it to conf?
+ @return UnixUserGroupInformation a user/group information
+ @exception LoginException if not able to get the user/group information]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Decide if two UGIs are the same
+
+ @param other other object
+ @return true if they are the same; false otherwise.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code for this UGI.
+ The hash code for a UGI is the hash code of its user name string.
+
+ @return a hash code value for this UGI.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this object to a string
+
+ @return a comma separated string containing the user name and group names]]>
+ </doc>
+ </method>
+ <field name="UGI_PROPERTY_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of UserGroupInformation in the Unix system]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <!-- start class org.apache.hadoop.security.UserGroupInformation -->
+ <class name="UserGroupInformation" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="UserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCurrentUGI" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="setCurrentUGI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <doc>
+ <![CDATA[Set the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get username
+
+ @return the user's name]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the groups that the user belong to
+
+ @return an array of group names]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Login and return a UserGroupInformation object.]]>
+ </doc>
+ </method>
+ <method name="readFrom" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link UserGroupInformation} from conf]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A {@link Writable} abstract class for storing user and groups information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UserGroupInformation -->
+</package>
+<package name="org.apache.hadoop.tools">
+ <!-- start class org.apache.hadoop.tools.Logalyzer -->
+ <class name="Logalyzer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Logalyzer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doArchive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logListURI" type="java.lang.String"/>
+ <param name="archiveDirectory" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doArchive: Workhorse function to archive log-files.
+ @param logListURI : The uri which will serve list of log-files to archive.
+ @param archiveDirectory : The directory to store archived logfiles.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="doAnalyze"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFilesDirectory" type="java.lang.String"/>
+ <param name="outputDirectory" type="java.lang.String"/>
+ <param name="grepPattern" type="java.lang.String"/>
+ <param name="sortColumns" type="java.lang.String"/>
+ <param name="columnSeparator" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doAnalyze:
+ @param inputFilesDirectory : Directory containing the files to be analyzed.
+ @param outputDirectory : Directory to store analysis (output).
+ @param grepPattern : Pattern to *grep* for.
+ @param sortColumns : Sort specification for output.
+ @param columnSeparator : Column separator.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[Logalyzer: A utility tool for archiving and analyzing hadoop logs.
+ <p>
+ This tool supports archiving and anaylzing (sort/grep) of log-files.
+ It takes as input
+ a) Input uri which will serve uris of the logs to be archived.
+ b) Output directory (not mandatory).
+ b) Directory on dfs to archive the logs.
+ c) The sort/grep patterns for analyzing the files and separator for boundaries.
+ Usage:
+ Logalyzer -archive -archiveDir <directory to archive logs> -analysis <directory> -logs <log-list uri> -grep <pattern> -sort <col1, col2> -separator <separator>
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <class name="Logalyzer.LogComparator" extends="org.apache.hadoop.io.Text.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Logalyzer.LogComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys of the logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+ <class name="Logalyzer.LogRegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="Logalyzer.LogRegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+</package>
+<package name="org.apache.hadoop.util">
+ <!-- start class org.apache.hadoop.util.CopyFiles -->
+ <class name="CopyFiles" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="CopyFiles" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="destPath" type="java.lang.String"/>
+ <param name="logPath" type="org.apache.hadoop.fs.Path"/>
+ <param name="srcAsList" type="boolean"/>
+ <param name="ignoreReadFailures" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This is the main driver for recursively copying directories
+ across file systems. It takes at least two cmdline parameters. A source
+ URL and a destination URL. It then essentially does an "ls -lR" on the
+ source URL, and writes the output in a round-robin manner to all the map
+ input files. The mapper actually copies the files allotted to it. The
+ reduce is empty.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[A Map-reduce program to recursively copy directories between
+ different file-systems.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.CopyFiles -->
+ <!-- start class org.apache.hadoop.util.CopyFiles.DuplicationException -->
+ <class name="CopyFiles.DuplicationException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="ERROR_CODE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Error code for this exception]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An exception class for duplicated source files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.CopyFiles.DuplicationException -->
+ <!-- start class org.apache.hadoop.util.Daemon -->
+ <class name="Daemon" extends="java.lang.Thread"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Daemon"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.ThreadGroup, java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread to be part of a specified thread group.]]>
+ </doc>
+ </constructor>
+ <method name="getRunnable" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A thread that has called {@link Thread#setDaemon(boolean) } with true.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Daemon -->
+ <!-- start class org.apache.hadoop.util.DiskChecker -->
+ <class name="DiskChecker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mkdirsWithExistsCheck" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[The semantics of mkdirsWithExistsCheck method is different from the mkdirs
+ method provided in the Sun's java.io.File class in the following way:
+ While creating the non-existent parent directories, this method checks for
+ the existence of those directories if the mkdir fails at any point (since
+ that directory might have just been created by some other process).
+ If both mkdir() and the exists() check fails for any seemingly
+ non-existent directory, then we signal an error; Sun's mkdir would signal
+ an error (return false) if a directory it is attempting to create already
+ exists or the mkdir fails.
+ @param dir
+ @return true on success, false on failure]]>
+ </doc>
+ </method>
+ <method name="checkDir"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ </method>
+ <doc>
+ <![CDATA[Class that provides utility functions for checking disk problem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <class name="DiskChecker.DiskErrorException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskErrorException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <class name="DiskChecker.DiskOutOfSpaceException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskOutOfSpaceException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <!-- start class org.apache.hadoop.util.GenericOptionsParser -->
+ <class name="GenericOptionsParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser<code> to parse only the generic Hadoop
+ arguments.
+
+ The array of string arguments other than the generic arguments can be
+ obtained by {@link #getRemainingArgs()}.
+
+ @param conf the <code>Configuration</code> to modify.
+ @param args command-line arguments.]]>
+ </doc>
+ </constructor>
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, org.apache.commons.cli.Options, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser</code> to parse given options as well
+ as generic Hadoop options.
+
+ The resulting <code>CommandLine</code> object can be obtained by
+ {@link #getCommandLine()}.
+
+ @param conf the configuration to modify
+ @param options options built by the caller
+ @param args User-specified arguments]]>
+ </doc>
+ </constructor>
+ <method name="getRemainingArgs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array of Strings containing only application-specific arguments.
+
+ @return array of <code>String</code>s containing the un-parsed arguments.]]>
+ </doc>
+ </method>
+ <method name="getCommandLine" return="org.apache.commons.cli.CommandLine"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the commons-cli <code>CommandLine</code> object
+ to process the parsed arguments.
+
+ Note: If the object is created with
+ {@link #GenericOptionsParser(Configuration, String[])}, then returned
+ object will only contain parsed generic options.
+
+ @return <code>CommandLine</code> representing list of arguments
+ parsed against Options descriptor.]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Print the usage message for generic command-line options supported.
+
+ @param out stream to print the usage message to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>GenericOptionsParser</code> is a utility to parse command line
+ arguments generic to the Hadoop framework.
+
+ <code>GenericOptionsParser</code> recognizes several standarad command
+ line arguments, enabling applications to easily specify a namenode, a
+ jobtracker, additional configuration resources etc.
+
+ <h4 id="GenericOptions">Generic Options</h4>
+
+ <p>The supported generic options are:</p>
+ <p><blockquote><pre>
+ -conf &lt;configuration file&gt; specify a configuration file
+ -D &lt;property=value&gt; use value for given property
+ -fs &lt;local|namenode:port&gt; specify a namenode
+ -jt &lt;local|jobtracker:port&gt; specify a job tracker
+ </pre></blockquote></p>
+
+ <p>The general command line syntax is:</p>
+ <p><tt><pre>
+ bin/hadoop command [genericOptions] [commandOptions]
+ </pre></tt></p>
+
+ <p>Generic command line arguments <strong>might</strong> modify
+ <code>Configuration </code> objects, given to constructors.</p>
+
+ <p>The functionality is implemented using Commons CLI.</p>
+
+ <p>Examples:</p>
+ <p><blockquote><pre>
+ $ bin/hadoop dfs -fs darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -conf hadoop-site.xml -ls /data
+ list /data directory in dfs with conf specified in hadoop-site.xml
+
+ $ bin/hadoop job -D mapred.job.tracker=darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt local -submit job.xml
+ submit a job to local runner
+ </pre></blockquote></p>
+
+ @see Tool
+ @see ToolRunner]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericOptionsParser -->
+ <!-- start class org.apache.hadoop.util.GenericsUtil -->
+ <class name="GenericsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericsUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClass" return="java.lang.Class&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <doc>
+ <![CDATA[Returns the Class object (of type <code>Class&lt;T&gt;</code>) of the
+ argument of type <code>T</code>.
+ @param <T> The type of the argument
+ @param t the object to get it class
+ @return <code>Class&lt;T&gt;</code>]]>
+ </doc>
+ </method>
+ <method name="toArray" return="T[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <param name="list" type="java.util.List&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param c the Class object of the items in the list
+ @param list the list to convert]]>
+ </doc>
+ </method>
+ <method name="toArray" return="T[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="list" type="java.util.List&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param list the list to convert
+ @throws ArrayIndexOutOfBoundsException if the list is empty.
+ Use {@link #toArray(Class, List)} if the list may be empty.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Contains utility methods for dealing with Java Generics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericsUtil -->
+ <!-- start class org.apache.hadoop.util.HostsFileReader -->
+ <class name="HostsFileReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HostsFileReader" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="refresh"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getHosts" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExcludedHosts" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.util.HostsFileReader -->
+ <!-- start interface org.apache.hadoop.util.IndexedSortable -->
+ <interface name="IndexedSortable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Compare items at the given addresses consistent with the semantics of
+ {@link java.util.Comparable#compare}.]]>
+ </doc>
+ </method>
+ <method name="swap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Swap items at the given addresses.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for collections capable of being sorted by {@link IndexedSorter}
+ algorithms.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSortable -->
+ <!-- start interface org.apache.hadoop.util.IndexedSorter -->
+ <interface name="IndexedSorter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the items accessed through the given IndexedSortable over the given
+ range of logical indices. From the perspective of the sort algorithm,
+ each index between l (inclusive) and r (exclusive) is an addressable
+ entry.
+ @see IndexedSortable#compare
+ @see IndexedSortable#swap]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for sort algorithms accepting {@link IndexedSortable} items.
+
+ A sort algorithm implementing this interface may only
+ {@link IndexedSortable#compare} and {@link IndexedSortable#swap} items
+ for a range of indices to effect a sort across that range.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSorter -->
+ <!-- start class org.apache.hadoop.util.MergeSort -->
+ <class name="MergeSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MergeSort" type="java.util.Comparator&lt;org.apache.hadoop.io.IntWritable&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mergeSort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="int[]"/>
+ <param name="dest" type="int[]"/>
+ <param name="low" type="int"/>
+ <param name="high" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of MergeSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.MergeSort -->
+ <!-- start class org.apache.hadoop.util.NativeCodeLoader -->
+ <class name="NativeCodeLoader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeCodeLoader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeCodeLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if native-hadoop code is loaded for this platform.
+
+ @return <code>true</code> if native-hadoop is loaded,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getLoadNativeLibraries" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Return if native hadoop libraries, if present, can be used for this job.
+ @param jobConf job configuration
+
+ @return <code>true</code> if native hadoop libraries, if present, can be
+ used for this job; <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setLoadNativeLibraries"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="loadNativeLibraries" type="boolean"/>
+ <doc>
+ <![CDATA[Set if native hadoop libraries, if present, can be used for this job.
+
+ @param jobConf job configuration
+ @param loadNativeLibraries can native hadoop libraries be loaded]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A helper to load the native hadoop code i.e. libhadoop.so.
+ This handles the fallback to either the bundled libhadoop-Linux-i386-32.so
+ or the default java implementations where appropriate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.NativeCodeLoader -->
+ <!-- start class org.apache.hadoop.util.PlatformName -->
+ <class name="PlatformName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PlatformName"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPlatformName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete platform as per the java-vm.
+ @return returns the complete platform as per the java-vm.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[A helper class for getting build-info of the java-vm.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PlatformName -->
+ <!-- start class org.apache.hadoop.util.PrintJarMainClass -->
+ <class name="PrintJarMainClass" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PrintJarMainClass"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A micro-application that prints the main class name out of a jar file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PrintJarMainClass -->
+ <!-- start class org.apache.hadoop.util.PriorityQueue -->
+ <class name="PriorityQueue" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PriorityQueue"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="lessThan" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Determines the ordering of objects in this priority queue. Subclasses
+ must define this one method.]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="maxSize" type="int"/>
+ <doc>
+ <![CDATA[Subclass constructors must call this.]]>
+ </doc>
+ </method>
+ <method name="put"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Adds an Object to a PriorityQueue in log(size) time.
+ If one tries to add more objects than maxSize from initialize
+ a RuntimeException (ArrayIndexOutOfBound) is thrown.]]>
+ </doc>
+ </method>
+ <method name="insert" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Adds element to the PriorityQueue in log(size) time if either
+ the PriorityQueue is not full, or not lessThan(element, top()).
+ @param element
+ @return true if element is added, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="top" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the least element of the PriorityQueue in constant time.]]>
+ </doc>
+ </method>
+ <method name="pop" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes and returns the least element of the PriorityQueue in log(size)
+ time.]]>
+ </doc>
+ </method>
+ <method name="adjustTop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should be called when the Object at top changes values. Still log(n)
+ worst case, but it's at least twice as fast to <pre>
+ { pq.top().change(); pq.adjustTop(); }
+ </pre> instead of <pre>
+ { o = pq.pop(); o.change(); pq.push(o); }
+ </pre>]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of elements currently stored in the PriorityQueue.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes all entries from the PriorityQueue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A PriorityQueue maintains a partial ordering of its elements such that the
+ least element can always be found in constant time. Put()'s and pop()'s
+ require log(size) time.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PriorityQueue -->
+ <!-- start class org.apache.hadoop.util.ProgramDriver -->
+ <class name="ProgramDriver" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ProgramDriver"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="mainClass" type="java.lang.Class"/>
+ <param name="description" type="java.lang.String"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is the method that adds the classed to the repository
+ @param name The name of the string you want the class instance to be called with
+ @param mainClass The class that you want to add to the repository
+ @param description The description of the class
+ @throws NoSuchMethodException
+ @throws SecurityException]]>
+ </doc>
+ </method>
+ <method name="driver"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is a driver for the example programs.
+ It looks at the first command line argument and tries to find an
+ example program with that name.
+ If it is found, it calls the main method in that class with the rest
+ of the command line arguments.
+ @param args The argument from the user. args[0] is the command to run.
+ @throws NoSuchMethodException
+ @throws SecurityException
+ @throws IllegalAccessException
+ @throws IllegalArgumentException
+ @throws Throwable Anything thrown by the example program's main]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A driver that is used to run programs added to it]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ProgramDriver -->
+ <!-- start class org.apache.hadoop.util.Progress -->
+ <class name="Progress" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Progress"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new root node.]]>
+ </doc>
+ </constructor>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a named node to the tree.]]>
+ </doc>
+ </method>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds a node to the tree.]]>
+ </doc>
+ </method>
+ <method name="startNextPhase"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Called during execution to move to the next phase at this level in the
+ tree.]]>
+ </doc>
+ </method>
+ <method name="phase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current sub-node executing.]]>
+ </doc>
+ </method>
+ <method name="complete"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Completes this node, moving the parent node to its next child.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progress" type="float"/>
+ <doc>
+ <![CDATA[Called during execution on a leaf node to set its progress.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the overall progress of the root.]]>
+ </doc>
+ </method>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Utility to assist with generation of progress reports. Applications build
+ a hierarchy of {@link Progress} instances, each modelling a phase of
+ execution. The root is constructed with {@link #Progress()}. Nodes for
+ sub-phases are created by calling {@link #addPhase()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Progress -->
+ <!-- start interface org.apache.hadoop.util.Progressable -->
+ <interface name="Progressable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Report progress to the Hadoop framework.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for reporting progress.
+
+ <p>Clients and/or applications can use the provided <code>Progressable</code>
+ to explicitly report progress to the Hadoop framework. This is especially
+ important for operations which take an insignificant amount of time since,
+ in-lieu of the reported progress, the framework has to assume that an error
+ has occured and time-out the operation.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Progressable -->
+ <!-- start class org.apache.hadoop.util.QuickSort -->
+ <class name="QuickSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="QuickSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Same as {@link #sort}, but indicate that we're making progress after
+ each partition.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of QuickSort.
+ See "Median-of-Three Partitioning" in Sedgewick book.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.QuickSort -->
+ <!-- start class org.apache.hadoop.util.ReflectionUtils -->
+ <class name="ReflectionUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReflectionUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theObject" type="java.lang.Object"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check and set 'configuration' if necessary.
+
+ @param theObject object for which to set configuration
+ @param conf Configuration]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create an object for the given class and initialize it from conf
+
+ @param theClass class of which an object is created
+ @param conf Configuration
+ @return a new object]]>
+ </doc>
+ </method>
+ <method name="setContentionTracing"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="boolean"/>
+ </method>
+ <method name="printThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.PrintWriter"/>
+ <param name="title" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Print all of the thread's information and stack traces.
+
+ @param stream the stream to
+ @param title a string title for the stack trace]]>
+ </doc>
+ </method>
+ <method name="logThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="title" type="java.lang.String"/>
+ <param name="minInterval" type="long"/>
+ <doc>
+ <![CDATA[Log the current thread stacks at INFO level.
+ @param log the logger that logs the stack trace
+ @param title a descriptive title for the call stacks
+ @param minInterval the minimum time from the last]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General reflection utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ReflectionUtils -->
+ <!-- start class org.apache.hadoop.util.RunJar -->
+ <class name="RunJar" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RunJar"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="unJar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jarFile" type="java.io.File"/>
+ <param name="toDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unpack a jar file into a directory.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Run a Hadoop job jar. If the main class is not in the jar's manifest,
+ then it must be provided on the command line.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Run a Hadoop job jar.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.RunJar -->
+ <!-- start class org.apache.hadoop.util.ServletUtil -->
+ <class name="ServletUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ServletUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initHTML" return="java.io.PrintWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="response" type="javax.servlet.ServletResponse"/>
+ <param name="title" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initial HTML header]]>
+ </doc>
+ </method>
+ <method name="getParameter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.ServletRequest"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a parameter from a ServletRequest.
+ Return null if the parameter contains only white spaces.]]>
+ </doc>
+ </method>
+ <method name="htmlFooter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HTML footer to be added in the jsps.
+ @return the HTML footer.]]>
+ </doc>
+ </method>
+ <field name="HTML_TAIL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.util.ServletUtil -->
+ <!-- start class org.apache.hadoop.util.Shell -->
+ <class name="Shell" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param interval the minimum duration to wait before re-executing the
+ command.]]>
+ </doc>
+ </constructor>
+ <method name="getGROUPS_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's groups list]]>
+ </doc>
+ </method>
+ <method name="getGET_PERMISSION_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a Unix command to get permission information.]]>
+ </doc>
+ </method>
+ <method name="getUlimitMemoryCommand" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the Unix command for setting the maximum virtual memory available
+ to a given child process. This is only relevant when we are forking a
+ process from within the {@link org.apache.hadoop.mapred.Mapper} or the
+ {@link org.apache.hadoop.mapred.Reducer} implementations
+ e.g. <a href="{@docRoot}/org/apache/hadoop/mapred/pipes/package-summary.html">Hadoop Pipes</a>
+ or <a href="{@docRoot}/org/apache/hadoop/streaming/package-summary.html">Hadoop Streaming</a>.
+
+ It also checks to ensure that we are running on a *nix platform else
+ (e.g. in Cygwin/Windows) it returns <code>null</code>.
+ @param job job configuration
+ @return a <code>String[]</code> with the ulimit command arguments or
+ <code>null</code> if we are running on a non *nix platform or
+ if the limit is unspecified.]]>
+ </doc>
+ </method>
+ <method name="setEnvironment"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="env" type="java.util.Map&lt;java.lang.String, java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[set the environment for the command
+ @param env Mapping of environment variables]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[set the working directory
+ @param dir The directory where the command would be executed]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[check to see if a command needs to be executed and execute if needed]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return an array containing the command name & its parameters]]>
+ </doc>
+ </method>
+ <method name="parseExecResult"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the execution result]]>
+ </doc>
+ </method>
+ <method name="getProcess" return="java.lang.Process"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the current sub-process executing the given command
+ @return process executing the command]]>
+ </doc>
+ </method>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the exit code
+ @return the exit code of the process]]>
+ </doc>
+ </method>
+ <method name="execCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Static method to execute a shell command.
+ Covers most of the simple cases without requiring the user to implement
+ the <code>Shell</code> interface.
+ @param cmd shell command to execute.
+ @return the output of the executed command.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USER_NAME_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's name]]>
+ </doc>
+ </field>
+ <field name="SET_PERMISSION_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set permission]]>
+ </doc>
+ </field>
+ <field name="SET_OWNER_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set owner]]>
+ </doc>
+ </field>
+ <field name="SET_GROUP_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WINDOWS" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set to true on Windows platforms]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A base class for running a Unix command.
+
+ <code>Shell</code> can be used to run unix commands like <code>du</code> or
+ <code>df</code>. It also offers facilities to gate commands by
+ time-intervals.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell -->
+ <!-- start class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <class name="Shell.ExitCodeException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ExitCodeException" type="int, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is an IOException with exit code added.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <!-- start class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <class name="Shell.ShellCommandExecutor" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File, java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the shell command.]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getOutput" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the output of the shell command.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple shell command executor.
+
+ <code>ShellCommandExecutor</code>should be used in cases where the output
+ of the command needs no explicit parsing and where the command, working
+ directory and the environment remains unchanged. The output of the command
+ is stored as-is and is expected to be small.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <!-- start class org.apache.hadoop.util.StringUtils -->
+ <class name="StringUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StringUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stringifyException" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Make a string representation of the exception.
+ @param e The exception to stringify
+ @return A string with exception name and call stack.]]>
+ </doc>
+ </method>
+ <method name="simpleHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fullHostname" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a full hostname, return the word upto the first dot.
+ @param fullHostname the full hostname
+ @return the hostname to the first dot]]>
+ </doc>
+ </method>
+ <method name="humanReadableInt" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="number" type="long"/>
+ <doc>
+ <![CDATA[Given an integer, return a string that is in an approximate, but human
+ readable format.
+ It uses the bases 'k', 'm', and 'g' for 1024, 1024**2, and 1024**3.
+ @param number the number to format
+ @return a human readable form of the integer]]>
+ </doc>
+ </method>
+ <method name="formatPercent" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="done" type="double"/>
+ <param name="digits" type="int"/>
+ <doc>
+ <![CDATA[Format a percentage for presentation to the user.
+ @param done the percentage to format (0.0 to 1.0)
+ @param digits the number of digits past the decimal point
+ @return a string representation of the percentage]]>
+ </doc>
+ </method>
+ <method name="arrayToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strs" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Given an array of strings, return a comma-separated list of its elements.
+ @param strs Array of strings
+ @return Empty string if strs.length is 0, comma separated list of strings
+ otherwise]]>
+ </doc>
+ </method>
+ <method name="byteToHexString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Given an array of bytes it will convert the bytes to a hex string
+ representation of the bytes
+ @param bytes
+ @return hex string representation of the byte array]]>
+ </doc>
+ </method>
+ <method name="hexStringToByte" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a hexstring this will return the byte array corresponding to the
+ string
+ @param hex the hex String array
+ @return a byte array that is a hex string representation of the given
+ string. The size of the byte array is therefore hex.length/2]]>
+ </doc>
+ </method>
+ <method name="uriToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uris" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[@param uris]]>
+ </doc>
+ </method>
+ <method name="stringToURI" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="stringToPath" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="formatTimeDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Given a finish and start time in long milliseconds, returns a
+ String in the format Xhrs, Ymins, Z sec, for the time difference between two times.
+ If finish time comes before start time then negative valeus of X, Y and Z wil return.
+
+ @param finishTime finish time
+ @param startTime start time]]>
+ </doc>
+ </method>
+ <method name="getFormattedTimeWithDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dateFormat" type="java.text.DateFormat"/>
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Formats time in ms and appends difference (finishTime - startTime)
+ as returned by formatTimeDiff().
+ If finish time is 0, empty string is returned, if start time is 0
+ then difference is not appended to return value.
+ @param dateFormat date format to use
+ @param finishTime fnish time
+ @param startTime start time
+ @return formatted value.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[returns an arraylist of strings
+ @param str the comma seperated string values
+ @return the arraylist of the comma seperated string values]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Split a string using the default separator
+ @param str a string that may have escaped separator
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="separator" type="char"/>
+ <doc>
+ <![CDATA[Split a string using the given separator
+ @param str a string that may have escaped separator
+ @param escapeChar a char that be used to escape the separator
+ @param separator a separator char
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Escape commas in the string using the default escape char
+ @param str a string
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Escape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the char to be escaped
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Unescape commas in the string using the default escape char
+ @param str a string
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Unescape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the escaped char
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="getHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return hostname without throwing exception.
+ @return hostname]]>
+ </doc>
+ </method>
+ <method name="startupShutdownMessage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <param name="args" type="java.lang.String[]"/>
+ <param name="LOG" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Print a log message for starting up and shutting down
+ @param clazz the class of the server
+ @param args arguments
+ @param LOG the target log object]]>
+ </doc>
+ </method>
+ <field name="COMMA" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ESCAPE_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[General string utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.StringUtils -->
+ <!-- start interface org.apache.hadoop.util.Tool -->
+ <interface name="Tool" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Execute the command with the given arguments.
+
+ @param args command specific arguments.
+ @return exit code.
+ @throws Exception]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A tool interface that supports handling of generic command-line options.
+
+ <p><code>Tool</code>, is the standard for any Map-Reduce tool/application.
+ The tool/application should delegate the handling of
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ standard command-line options</a> to {@link ToolRunner#run(Tool, String[])}
+ and only handle its custom arguments.</p>
+
+ <p>Here is how a typical <code>Tool</code> is implemented:</p>
+ <p><blockquote><pre>
+ public class MyApp extends Configured implements Tool {
+
+ public int run(String[] args) throws Exception {
+ // <code>Configuration</code> processed by <code>ToolRunner</code>
+ Configuration conf = getConf();
+
+ // Create a JobConf using the processed <code>conf</code>
+ JobConf job = new JobConf(conf, MyApp.class);
+
+ // Process custom command-line options
+ Path in = new Path(args[1]);
+ Path out = new Path(args[2]);
+
+ // Specify various job-specific parameters
+ job.setJobName("my-app");
+ job.setInputPath(in);
+ job.setOutputPath(out);
+ job.setMapperClass(MyApp.MyMapper.class);
+ job.setReducerClass(MyApp.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ }
+
+ public static void main(String[] args) throws Exception {
+ // Let <code>ToolRunner</code> handle generic command-line options
+ int res = ToolRunner.run(new Configuration(), new Sort(), args);
+
+ System.exit(res);
+ }
+ }
+ </pre></blockquote></p>
+
+ @see GenericOptionsParser
+ @see ToolRunner]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Tool -->
+ <!-- start class org.apache.hadoop.util.ToolRunner -->
+ <class name="ToolRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ToolRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the given <code>Tool</code> by {@link Tool#run(String[])}, after
+ parsing with the given generic arguments. Uses the given
+ <code>Configuration</code>, or builds one if null.
+
+ Sets the <code>Tool</code>'s configuration with the possibly modified
+ version of the <code>conf</code>.
+
+ @param conf <code>Configuration</code> for the <code>Tool</code>.
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the <code>Tool</code> with its <code>Configuration</code>.
+
+ Equivalent to <code>run(tool.getConf(), tool, args)</code>.
+
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Prints generic command-line argurments and usage information.
+
+ @param out stream to write usage information to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A utility to help run {@link Tool}s.
+
+ <p><code>ToolRunner</code> can be used to run classes implementing
+ <code>Tool</code> interface. It works in conjunction with
+ {@link GenericOptionsParser} to parse the
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ generic hadoop command line arguments</a> and modifies the
+ <code>Configuration</code> of the <code>Tool</code>. The
+ application-specific options are passed along without being modified.
+ </p>
+
+ @see Tool
+ @see GenericOptionsParser]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ToolRunner -->
+ <!-- start class org.apache.hadoop.util.VersionInfo -->
+ <class name="VersionInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the Hadoop version.
+ @return the Hadoop version string, eg. "0.6.3-dev"]]>
+ </doc>
+ </method>
+ <method name="getRevision" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion revision number for the root directory
+ @return the revision number, eg. "451451"]]>
+ </doc>
+ </method>
+ <method name="getDate" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The date that Hadoop was compiled.
+ @return the compilation date in unix date format]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The user that compiled Hadoop.
+ @return the username of the user]]>
+ </doc>
+ </method>
+ <method name="getUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion URL for the root Hadoop directory.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[This class finds the package info for Hadoop and the HadoopVersionAnnotation
+ information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.VersionInfo -->
+ <!-- start class org.apache.hadoop.util.XMLUtils -->
+ <class name="XMLUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="XMLUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="transform"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="styleSheet" type="java.io.InputStream"/>
+ <param name="xml" type="java.io.InputStream"/>
+ <param name="out" type="java.io.Writer"/>
+ <exception name="TransformerConfigurationException" type="javax.xml.transform.TransformerConfigurationException"/>
+ <exception name="TransformerException" type="javax.xml.transform.TransformerException"/>
+ <doc>
+ <![CDATA[Transform input xml given a stylesheet.
+
+ @param styleSheet the style-sheet
+ @param xml input xml data
+ @param out output
+ @throws TransformerConfigurationException
+ @throws TransformerException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General xml utilities.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.XMLUtils -->
+ <doc>
+ <![CDATA[Common utilities.]]>
+ </doc>
+</package>
+
+</api>
diff --git a/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.18.1.xml b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.18.1.xml
new file mode 100644
index 0000000000..fd844cbed0
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.18.1.xml
@@ -0,0 +1,44778 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Thu Sep 25 11:22:04 PDT 2008 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="hadoop 0.18.1"
+ jdversion="1.1.0">
+
+<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/cutting/local/jdiff/jdiff.jar:/home/cutting/local/jdiff/xerces.jar -classpath /home/cutting/src/hadoop/release-0.18.1/lib/commons-cli-2.0-SNAPSHOT.jar:/home/cutting/src/hadoop/release-0.18.1/lib/commons-codec-1.3.jar:/home/cutting/src/hadoop/release-0.18.1/lib/commons-httpclient-3.0.1.jar:/home/cutting/src/hadoop/release-0.18.1/lib/commons-logging-1.0.4.jar:/home/cutting/src/hadoop/release-0.18.1/lib/commons-logging-api-1.0.4.jar:/home/cutting/src/hadoop/release-0.18.1/lib/commons-net-1.4.1.jar:/home/cutting/src/hadoop/release-0.18.1/lib/jets3t-0.6.0.jar:/home/cutting/src/hadoop/release-0.18.1/lib/jetty-5.1.4.jar:/home/cutting/src/hadoop/release-0.18.1/lib/jetty-ext/commons-el.jar:/home/cutting/src/hadoop/release-0.18.1/lib/jetty-ext/jasper-compiler.jar:/home/cutting/src/hadoop/release-0.18.1/lib/jetty-ext/jasper-runtime.jar:/home/cutting/src/hadoop/release-0.18.1/lib/jetty-ext/jsp-api.jar:/home/cutting/src/hadoop/release-0.18.1/lib/junit-3.8.1.jar:/home/cutting/src/hadoop/release-0.18.1/lib/kfs-0.1.3.jar:/home/cutting/src/hadoop/release-0.18.1/lib/log4j-1.2.15.jar:/home/cutting/src/hadoop/release-0.18.1/lib/oro-2.0.8.jar:/home/cutting/src/hadoop/release-0.18.1/lib/servlet-api.jar:/home/cutting/src/hadoop/release-0.18.1/lib/slf4j-api-1.4.3.jar:/home/cutting/src/hadoop/release-0.18.1/lib/slf4j-log4j12-1.4.3.jar:/home/cutting/src/hadoop/release-0.18.1/lib/xmlenc-0.52.jar:/home/cutting/src/hadoop/release-0.18.1/conf:/usr/share/ant/lib/ant-launcher.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/xercesImpl.jar:/usr/share/ant/lib/ant-javamail.jar:/usr/share/ant/lib/ant-apache-resolver.jar:/usr/share/ant/lib/ant-jdepend.jar:/usr/share/ant/lib/ant-apache-regexp.jar:/usr/share/ant/lib/ant-apache-log4j.jar:/usr/share/ant/lib/ant-apache-oro.jar:/usr/share/ant/lib/junit.jar:/usr/share/ant/lib/ant.jar:/usr/share/ant/lib/ant-jmf.jar:/usr/share/ant/lib/ant-commons-logging.jar:/usr/share/ant/lib/catalina5.5-ant.jar:/usr/share/ant/lib/ant-commons-net.jar:/usr/share/ant/lib/ant-bootstrap.jar:/usr/share/ant/lib/tomcat5.5-jkstatus-ant.jar:/usr/share/ant/lib/ant-nodeps.jar:/usr/share/ant/lib/ant-jsch.jar:/usr/share/ant/lib/jsch.jar:/usr/share/ant/lib/catalina5.5-ant-jmx.jar:/usr/share/ant/lib/ant-swing.jar:/usr/share/ant/lib/ant-antlr.jar:/usr/share/ant/lib/ant-apache-bsf.jar:/usr/share/ant/lib/bcel.jar:/usr/share/ant/lib/ant-trax.jar:/usr/share/ant/lib/junit-3.8.1.jar:/usr/share/ant/lib/ant-apache-bcel.jar:/usr/share/ant/lib/ant-junit.jar:/usr/lib/jvm/java-6-sun-1.6.0.06/lib/tools.jar -sourcepath /home/cutting/src/hadoop/release-0.18.1/src/core:/home/cutting/src/hadoop/release-0.18.1/src/hdfs:/home/cutting/src/hadoop/release-0.18.1/src/mapred:/home/cutting/src/hadoop/release-0.18.1/src/tools -apidir /home/cutting/src/hadoop/release-0.18.1/build -apiname hadoop 0.18.1 -->
+<package name="org.apache.hadoop">
+ <!-- start class org.apache.hadoop.HadoopVersionAnnotation -->
+ <class name="HadoopVersionAnnotation" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.annotation.Annotation"/>
+ <doc>
+ <![CDATA[A package attribute that captures the version of Hadoop that was compiled.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.HadoopVersionAnnotation -->
+</package>
+<package name="org.apache.hadoop.conf">
+ <!-- start interface org.apache.hadoop.conf.Configurable -->
+ <interface name="Configurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration to be used by this object.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the configuration used by this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Something that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.conf.Configurable -->
+ <!-- start class org.apache.hadoop.conf.Configuration -->
+ <class name="Configuration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable&lt;java.util.Map.Entry&lt;java.lang.String, java.lang.String&gt;&gt;"/>
+ <constructor name="Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configuration" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration with the same settings cloned from another.
+
+ @param other the configuration from which to clone settings.]]>
+ </doc>
+ </constructor>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param name resource to be added, the classpath is examined for a file
+ with that name.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="url" type="java.net.URL"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param url url of the resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param file file-path of resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, <code>null</code> if
+ no such property exists.
+
+ Values are processed for <a href="#VariableExpansion">variable expansion</a>
+ before being returned.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="getRaw" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, without doing
+ <a href="#VariableExpansion">variable expansion</a>.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the <code>value</code> of the <code>name</code> property.
+
+ @param name property name.
+ @param value property value.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property. If no such property
+ exists, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value, or <code>defaultValue</code> if the property
+ doesn't exist.]]>
+ </doc>
+ </method>
+ <method name="getInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="int"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as an <code>int</code>.
+
+ If no such property exists, or if the specified value is not a valid
+ <code>int</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as an <code>int</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to an <code>int</code>.
+
+ @param name property name.
+ @param value <code>int</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="long"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>long</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>long</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>long</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>long</code>.
+
+ @param name property name.
+ @param value <code>long</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="float"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>float</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>float</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>float</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getBoolean" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="boolean"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>boolean</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>boolean</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>boolean</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setBoolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>boolean</code>.
+
+ @param name property name.
+ @param value <code>boolean</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Parse the given attribute as a set of integer ranges
+ @param name the attribute name
+ @param defaultValue the default value if it is not set
+ @return a new set of ranges from the configured value]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ a collection of <code>String</code>s.
+ If no such property is specified then empty collection is returned.
+ <p>
+ This is an optimized version of {@link #getStrings(String)}
+
+ @param name property name.
+ @return property value as a collection of <code>String</code>s.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then <code>null</code> is returned.
+
+ @param name property name.
+ @return property value as an array of <code>String</code>s,
+ or <code>null</code>.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then default value is returned.
+
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of <code>String</code>s,
+ or default value.]]>
+ </doc>
+ </method>
+ <method name="setStrings"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="values" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Set the array of string values for the <code>name</code> property as
+ as comma delimited values.
+
+ @param name property name.
+ @param values The values]]>
+ </doc>
+ </method>
+ <method name="getClassByName" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Load a class by name.
+
+ @param name the class name.
+ @return the class object.
+ @throws ClassNotFoundException if the class is not found.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>.
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;? extends U&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends U&gt;"/>
+ <param name="xface" type="java.lang.Class&lt;U&gt;"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>
+ implementing the interface specified by <code>xface</code>.
+
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ An exception is thrown if the returned class does not implement the named
+ interface.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @param xface the interface implemented by the named class.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="xface" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to the name of a
+ <code>theClass</code> implementing the given interface <code>xface</code>.
+
+ An exception is thrown if <code>theClass</code> does not implement the
+ interface <code>xface</code>.
+
+ @param name property name.
+ @param theClass property value.
+ @param xface the interface implemented by the named class.]]>
+ </doc>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file under a directory named by <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file name under a directory named in <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getResource" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the {@link URL} for the named resource.
+
+ @param name resource name.
+ @return the url for the named resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get an input stream attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return an input stream attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsReader" return="java.io.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a {@link Reader} attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return a reader attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;java.util.Map.Entry&lt;java.lang.String, java.lang.String&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get an {@link Iterator} to go through the list of <code>String</code>
+ key-value pairs in the configuration.
+
+ @return an iterator over the entries.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write out the non-default properties in this configuration to the give
+ {@link OutputStream}.
+
+ @param out the output stream to write to.]]>
+ </doc>
+ </method>
+ <method name="getClassLoader" return="java.lang.ClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link ClassLoader} for this job.
+
+ @return the correct class loader.]]>
+ </doc>
+ </method>
+ <method name="setClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="classLoader" type="java.lang.ClassLoader"/>
+ <doc>
+ <![CDATA[Set the class loader that will be used to load the various objects.
+
+ @param classLoader the new class loader.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setQuietMode"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="quietmode" type="boolean"/>
+ <doc>
+ <![CDATA[Set the quiteness-mode.
+
+ In the quite-mode error and informational messages might not be logged.
+
+ @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
+ to turn it off.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[For debugging. List non-default properties to the terminal and exit.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provides access to configuration parameters.
+
+ <h4 id="Resources">Resources</h4>
+
+ <p>Configurations are specified by resources. A resource contains a set of
+ name/value pairs as XML data. Each resource is named by either a
+ <code>String</code> or by a {@link Path}. If named by a <code>String</code>,
+ then the classpath is examined for a file with that name. If named by a
+ <code>Path</code>, then the local filesystem is examined directly, without
+ referring to the classpath.
+
+ <p>Hadoop by default specifies two resources, loaded in-order from the
+ classpath: <ol>
+ <li><tt><a href="{@docRoot}/../hadoop-default.html">hadoop-default.xml</a>
+ </tt>: Read-only defaults for hadoop.</li>
+ <li><tt>hadoop-site.xml</tt>: Site-specific configuration for a given hadoop
+ installation.</li>
+ </ol>
+ Applications may add additional resources, which are loaded
+ subsequent to these resources in the order they are added.
+
+ <h4 id="FinalParams">Final Parameters</h4>
+
+ <p>Configuration parameters may be declared <i>final</i>.
+ Once a resource declares a value final, no subsequently-loaded
+ resource can alter that value.
+ For example, one might define a final parameter with:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;dfs.client.buffer.dir&lt;/name&gt;
+ &lt;value&gt;/tmp/hadoop/dfs/client&lt;/value&gt;
+ <b>&lt;final&gt;true&lt;/final&gt;</b>
+ &lt;/property&gt;</pre></tt>
+
+ Administrators typically define parameters as final in
+ <tt>hadoop-site.xml</tt> for values that user applications may not alter.
+
+ <h4 id="VariableExpansion">Variable Expansion</h4>
+
+ <p>Value strings are first processed for <i>variable expansion</i>. The
+ available properties are:<ol>
+ <li>Other properties defined in this Configuration; and, if a name is
+ undefined here,</li>
+ <li>Properties in {@link System#getProperties()}.</li>
+ </ol>
+
+ <p>For example, if a configuration resource contains the following property
+ definitions:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;basedir&lt;/name&gt;
+ &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
+ &lt;/property&gt;
+
+ &lt;property&gt;
+ &lt;name&gt;tempdir&lt;/name&gt;
+ &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt;
+ &lt;/property&gt;</pre></tt>
+
+ When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt>
+ will be resolved to another property in this Configuration, while
+ <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value
+ of the System property with that name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration -->
+ <!-- start class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <class name="Configuration.IntegerRanges" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Configuration.IntegerRanges"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Configuration.IntegerRanges" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isIncluded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Is the given value in the set of ranges
+ @param value the value to check
+ @return is the value in the ranges?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A class that represents a set of positive integer ranges. It parses
+ strings of the form: "2-3,5,7-" where ranges are separated by comma and
+ the lower/upper bounds are separated by dash. Either the lower or upper
+ bound may be omitted meaning all values up to or over. So the string
+ above means 2, 3, 5, and 7, 8, 9, ...]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <!-- start class org.apache.hadoop.conf.Configured -->
+ <class name="Configured" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Configured"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configured" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Base class for things that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configured -->
+</package>
+<package name="org.apache.hadoop.dfs">
+ <!-- start class org.apache.hadoop.dfs.AlreadyBeingCreatedException -->
+ <class name="AlreadyBeingCreatedException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AlreadyBeingCreatedException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The exception that happens when you ask to create a file that already
+ is being created, but is not closed yet.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.AlreadyBeingCreatedException -->
+ <!-- start class org.apache.hadoop.dfs.Balancer -->
+ <class name="Balancer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Run a balancer
+ @param args]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main method of Balancer
+ @param args arguments to a Balancer
+ @exception any exception occurs during datanode balancing]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return this balancer's configuration]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[set this balancer's configuration]]>
+ </doc>
+ </method>
+ <field name="SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ALREADY_RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NO_MOVE_BLOCK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NO_MOVE_PROGRESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IO_EXCEPTION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ILLEGAL_ARGS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>The balancer is a tool that balances disk space usage on an HDFS cluster
+ when some datanodes become full or when new empty nodes join the cluster.
+ The tool is deployed as an application program that can be run by the
+ cluster administrator on a live HDFS cluster while applications
+ adding and deleting files.
+
+ <p>SYNOPSIS
+ <pre>
+ To start:
+ bin/start-balancer.sh [-threshold <threshold>]
+ Example: bin/ start-balancer.sh
+ start the balancer with a default threshold of 10%
+ bin/ start-balancer.sh -threshold 5
+ start the balancer with a threshold of 5%
+ To stop:
+ bin/ stop-balancer.sh
+ </pre>
+
+ <p>DESCRIPTION
+ <p>The threshold parameter is a fraction in the range of (0%, 100%) with a
+ default value of 10%. The threshold sets a target for whether the cluster
+ is balanced. A cluster is balanced if for each datanode, the utilization
+ of the node (ratio of used space at the node to total capacity of the node)
+ differs from the utilization of the (ratio of used space in the cluster
+ to total capacity of the cluster) by no more than the threshold value.
+ The smaller the threshold, the more balanced a cluster will become.
+ It takes more time to run the balancer for small threshold values.
+ Also for a very small threshold the cluster may not be able to reach the
+ balanced state when applications write and delete files concurrently.
+
+ <p>The tool moves blocks from highly utilized datanodes to poorly
+ utilized datanodes iteratively. In each iteration a datanode moves or
+ receives no more than the lesser of 10G bytes or the threshold fraction
+ of its capacity. Each iteration runs no more than 20 minutes.
+ At the end of each iteration, the balancer obtains updated datanodes
+ information from the namenode.
+
+ <p>A system property that limits the balancer's use of bandwidth is
+ defined in the default configuration file:
+ <pre>
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>1048576</value>
+ <description> Specifies the maximum bandwidth that each datanode
+ can utilize for the balancing purpose in term of the number of bytes
+ per second. </description>
+ </property>
+ </pre>
+
+ <p>This property determines the maximum speed at which a block will be
+ moved from one datanode to another. The default value is 1MB/s. The higher
+ the bandwidth, the faster a cluster can reach the balanced state,
+ but with greater competition with application processes. If an
+ administrator changes the value of this property in the configuration
+ file, the change is observed when HDFS is next restarted.
+
+ <p>MONITERING BALANCER PROGRESS
+ <p>After the balancer is started, an output file name where the balancer
+ progress will be recorded is printed on the screen. The administrator
+ can monitor the running of the balancer by reading the output file.
+ The output shows the balancer's status iteration by iteration. In each
+ iteration it prints the starting time, the iteration number, the total
+ number of bytes that have been moved in the previous iterations,
+ the total number of bytes that are left to move in order for the cluster
+ to be balanced, and the number of bytes that are being moved in this
+ iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left
+ To Move" is decreasing.
+
+ <p>Running multiple instances of the balancer in an HDFS cluster is
+ prohibited by the tool.
+
+ <p>The balancer automatically exits when any of the following five
+ conditions is satisfied:
+ <ol>
+ <li>The cluster is balanced;
+ <li>No block can be moved;
+ <li>No block has been moved for five consecutive iterations;
+ <li>An IOException occurs while communicating with the namenode;
+ <li>Another balancer is running.
+ </ol>
+
+ <p>Upon exit, a balancer returns an exit code and prints one of the
+ following messages to the output file in corresponding to the above exit
+ reasons:
+ <ol>
+ <li>The cluster is balanced. Exiting
+ <li>No block can be moved. Exiting...
+ <li>No block has been moved for 3 iterations. Exiting...
+ <li>Received an IO exception: failure reason. Exiting...
+ <li>Another balancer is running. Exiting...
+ </ol>
+
+ <p>The administrator can interrupt the execution of the balancer at any
+ time by running the command "stop-balancer.sh" on the machine where the
+ balancer is running.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.Balancer -->
+ <!-- start class org.apache.hadoop.dfs.ChecksumDistributedFileSystem -->
+ <class name="ChecksumDistributedFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumDistributedFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ChecksumDistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </constructor>
+ <method name="getRawCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw capacity of the filesystem, disregarding
+ replication .]]>
+ </doc>
+ </method>
+ <method name="getRawUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw used space in the filesystem, disregarding
+ replication .]]>
+ </doc>
+ </method>
+ <method name="getDataNodeStats" return="org.apache.hadoop.dfs.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return statistics for each datanode.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.dfs.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+
+ @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalize previously upgraded files system state.]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.dfs.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.dfs.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[We need to find the blocks that didn't match. Likely only one
+ is corrupt but we will report both to the namenode. In the future,
+ we can consider figuring out exactly which block is corrupt.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the stat information about the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of ChecksumFileSystem over DistributedFileSystem.
+ Note that as of now (May 07), DistributedFileSystem natively checksums
+ all of its data. Using this class is not be necessary in most cases.
+ Currently provided mainly for backward compatibility and testing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.ChecksumDistributedFileSystem -->
+ <!-- start class org.apache.hadoop.dfs.DataBlockScanner -->
+ <class name="DataBlockScanner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DataBlockScanner -->
+ <!-- start class org.apache.hadoop.dfs.DataBlockScanner.Servlet -->
+ <class name="DataBlockScanner.Servlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataBlockScanner.Servlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DataBlockScanner.Servlet -->
+ <!-- start class org.apache.hadoop.dfs.DataChecksum -->
+ <class name="DataChecksum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.zip.Checksum"/>
+ <method name="newDataChecksum" return="org.apache.hadoop.dfs.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="int"/>
+ <param name="bytesPerChecksum" type="int"/>
+ </method>
+ <method name="newDataChecksum" return="org.apache.hadoop.dfs.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <doc>
+ <![CDATA[Creates a DataChecksum from HEADER_LEN bytes from arr[offset].
+ @return DataChecksum of the type in the array or null in case of an error.]]>
+ </doc>
+ </method>
+ <method name="newDataChecksum" return="org.apache.hadoop.dfs.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This constructucts a DataChecksum by reading HEADER_LEN bytes from
+ input stream <i>in</i>]]>
+ </doc>
+ </method>
+ <method name="writeHeader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the checksum header to the output stream <i>out</i>.]]>
+ </doc>
+ </method>
+ <method name="getHeader" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="writeValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="reset" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the current checksum to the stream.
+ If <i>reset</i> is true, then resets the checksum.
+ @return number of bytes written. Will be equal to getChecksumSize();]]>
+ </doc>
+ </method>
+ <method name="writeValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="reset" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the current checksum to a buffer.
+ If <i>reset</i> is true, then resets the checksum.
+ @return number of bytes written. Will be equal to getChecksumSize();]]>
+ </doc>
+ </method>
+ <method name="compare" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <doc>
+ <![CDATA[Compares the checksum located at buf[offset] with the current checksum.
+ @return true if the checksum matches and false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getChecksumType" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getChecksumSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesPerChecksum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumBytesInSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getChecksumHeaderSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getValue" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ </method>
+ <field name="HEADER_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKSUM_NULL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKSUM_CRC32" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class provides inteface and utilities for processing checksums for
+ DFS data transfers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DataChecksum -->
+ <!-- start class org.apache.hadoop.dfs.DataNode -->
+ <class name="DataNode" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.dfs.InterDatanodeProtocol"/>
+ <implements name="org.apache.hadoop.dfs.ClientDatanodeProtocol"/>
+ <implements name="org.apache.hadoop.dfs.FSConstants"/>
+ <implements name="java.lang.Runnable"/>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use {@link NetUtils#createSocketAddr(String)} instead.]]>
+ </doc>
+ </method>
+ <method name="getDataNode" return="org.apache.hadoop.dfs.DataNode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the DataNode object]]>
+ </doc>
+ </method>
+ <method name="getNameNodeAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSelfAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNamenode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the namenode's identifier]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down this instance of the datanode.
+ Returns only after shutdown is complete.]]>
+ </doc>
+ </method>
+ <method name="offerService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Main loop for the DataNode. Runs until shutdown,
+ forever calling remote NameNode functions.]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[No matter what kind of exception we get, keep retrying to offerService().
+ That's the loop that connects to the NameNode and provides basic DataNode
+ functionality.
+
+ Only stop when "shouldRun" is turned off (which can only happen at shutdown).]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="scheduleBlockReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delay" type="long"/>
+ <doc>
+ <![CDATA[This methods arranges for the data node to send the block report at the next heartbeat.]]>
+ </doc>
+ </method>
+ <method name="getFSDataset" return="org.apache.hadoop.dfs.FSDatasetInterface"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method is used for testing.
+ Examples are adding and deleting blocks directly.
+ The most common usage will be when the data node's storage is similated.
+
+ @return the fsdataset that stores the blocks]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="getBlockMetaDataInfo" return="org.apache.hadoop.dfs.BlockMetaDataInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="updateBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldblock" type="org.apache.hadoop.dfs.Block"/>
+ <param name="newblock" type="org.apache.hadoop.dfs.Block"/>
+ <param name="finalize" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="recoverBlock" return="org.apache.hadoop.dfs.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.dfs.Block"/>
+ <param name="targets" type="org.apache.hadoop.dfs.DatanodeInfo[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DataNode is a class (and program) that stores a set of
+ blocks for a DFS deployment. A single deployment can
+ have one or many DataNodes. Each DataNode communicates
+ regularly with a single NameNode. It also communicates
+ with client code and other DataNodes from time to time.
+
+ DataNodes store a series of named blocks. The DataNode
+ allows client code to read these blocks, or to write new
+ block data. The DataNode may also, in response to instructions
+ from its NameNode, delete blocks or copy blocks to/from other
+ DataNodes.
+
+ The DataNode maintains just one critical table:
+ block-> stream of bytes (of BLOCK_SIZE or less)
+
+ This info is stored on a local disk. The DataNode
+ reports the table's contents to the NameNode upon startup
+ and every so often afterwards.
+
+ DataNodes spend their lives in an endless loop of asking
+ the NameNode for something to do. A NameNode cannot connect
+ to a DataNode directly; a NameNode simply returns values from
+ functions invoked by a DataNode.
+
+ DataNodes maintain an open server socket so that client code
+ or other DataNodes can read/write data. The host/port for
+ this server is reported to the NameNode, which then sends that
+ information to clients or other DataNodes that might be interested.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DataNode -->
+ <!-- start class org.apache.hadoop.dfs.DatanodeDescriptor -->
+ <class name="DatanodeDescriptor" extends="org.apache.hadoop.dfs.DatanodeInfo"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DatanodeDescriptor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+ @param nodeID id of the data node]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network
+ @param hostName it could be different from host specified for DatanodeID]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID, long, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param capacity capacity of the data node
+ @param dfsUsed space used by the data node
+ @param remaining remaing capacity of the data node
+ @param xceiverCount # of data transfers at the data node]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID, java.lang.String, java.lang.String, long, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeDescriptor constructor
+
+ @param nodeID id of the data node
+ @param networkLocation location of the data node in network
+ @param capacity capacity of the data node, including space used by non-dfs
+ @param dfsUsed the used space by dfs datanode
+ @param remaining remaing capacity of the data node
+ @param xceiverCount # of data transfers at the data node]]>
+ </doc>
+ </constructor>
+ <method name="getBlocksScheduled" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Approximate number of blocks currently scheduled to be written
+ to this datanode.]]>
+ </doc>
+ </method>
+ <field name="isAlive" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeDescriptor tracks stats on a given DataNode,
+ such as available storage capacity, last update time, etc.,
+ and maintains a set of blocks stored on the datanode.
+
+ This data structure is a data structure that is internal
+ to the namenode. It is *not* sent over-the-wire to the Client
+ or the Datnodes. Neither is it stored persistently in the
+ fsImage.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DatanodeDescriptor -->
+ <!-- start class org.apache.hadoop.dfs.DatanodeID -->
+ <class name="DatanodeID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable&lt;org.apache.hadoop.dfs.DatanodeID&gt;"/>
+ <constructor name="DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Equivalent to DatanodeID("").]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Equivalent to DatanodeID(nodeName, "", -1, -1).]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="org.apache.hadoop.dfs.DatanodeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DatanodeID copy constructor
+
+ @param from]]>
+ </doc>
+ </constructor>
+ <constructor name="DatanodeID" type="java.lang.String, java.lang.String, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create DatanodeID
+ @param nodeName (hostname:portNumber)
+ @param storageID data storage ID
+ @param infoPort info server port
+ @param ipcPort ipc server port]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return hostname:portNumber.]]>
+ </doc>
+ </method>
+ <method name="getStorageID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return data storage ID.]]>
+ </doc>
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return infoPort (the port at which the HTTP server bound to)]]>
+ </doc>
+ </method>
+ <method name="getIpcPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return ipcPort (the port at which the IPC server bound to)]]>
+ </doc>
+ </method>
+ <method name="getHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return hostname and no :portNumber.]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="to" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.dfs.DatanodeID"/>
+ <doc>
+ <![CDATA[Comparable.
+ Basis of compare is the String name (host:portNumber) only.
+ @param that
+ @return as specified by Comparable.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="name" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="storageID" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="infoPort" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ipcPort" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeID is composed of the data node
+ name (hostname:portNumber) and the data storage ID,
+ which it currently represents.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DatanodeID -->
+ <!-- start class org.apache.hadoop.dfs.DatanodeInfo -->
+ <class name="DatanodeInfo" extends="org.apache.hadoop.dfs.DatanodeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.Node"/>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw capacity.]]>
+ </doc>
+ </method>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The used space by the data node.]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw free space.]]>
+ </doc>
+ </method>
+ <method name="getLastUpdate" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The time when this information was accurate.]]>
+ </doc>
+ </method>
+ <method name="getXceiverCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[number of active connections]]>
+ </doc>
+ </method>
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[rack name]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the rack name]]>
+ </doc>
+ </method>
+ <method name="getHostName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setHostName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ </method>
+ <method name="getDatanodeReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A formatted string for reporting the status of the DataNode.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="level" type="int"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="capacity" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dfsUsed" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="remaining" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="lastUpdate" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="xceiverCount" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="location" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="hostName" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HostName as suplied by the datanode during registration as its
+ name. Namenode uses datanode IP address as the name.]]>
+ </doc>
+ </field>
+ <field name="adminState" type="org.apache.hadoop.dfs.DatanodeInfo.AdminStates"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[DatanodeInfo represents the status of a DataNode.
+ This object is used for communication in the
+ Datanode Protocol and the Client Protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DatanodeInfo -->
+ <!-- start class org.apache.hadoop.dfs.DatanodeInfo.AdminStates -->
+ <class name="DatanodeInfo.AdminStates" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.DatanodeInfo.AdminStates&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.DatanodeInfo.AdminStates[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.DatanodeInfo.AdminStates"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DatanodeInfo.AdminStates -->
+ <!-- start class org.apache.hadoop.dfs.DFSAdmin -->
+ <class name="DFSAdmin" extends="org.apache.hadoop.fs.FsShell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DFSAdmin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a DFSAdmin object.]]>
+ </doc>
+ </constructor>
+ <constructor name="DFSAdmin" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a DFSAdmin object.]]>
+ </doc>
+ </constructor>
+ <method name="report"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gives a report on how the FileSystem is doing.
+ @exception IOException if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Safe mode maintenance command.
+ Usage: java DFSAdmin -safemode [enter | leave | get]
+ @param argv List of of command line parameters.
+ @param idx The index of the command that is being processed.
+ @exception IOException if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <method name="refreshNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to reread the hosts and excluded hosts
+ file.
+ Usage: java DFSAdmin -refreshNodes
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to ask the namenode to finalize previously performed upgrade.
+ Usage: java DFSAdmin -finalizeUpgrade
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="upgradeProgress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Command to request current distributed upgrade status,
+ a detailed status, or to force the upgrade to proceed.
+
+ Usage: java DFSAdmin -upgradeProgress [status | details | force]
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="metaSave" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="idx" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps DFS data structures into specified file.
+ Usage: java DFSAdmin -metasave filename
+ @param argv List of of command line parameters.
+ @param idx The index of the command that is being processed.
+ @exception IOException if an error accoured wile accessing
+ the file or path.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@param argv The parameters passed to this program.
+ @exception Exception if the filesystem does not exist.
+ @return 0 on success, non zero on error.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods.
+ @param argv Command line parameters.
+ @exception Exception if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class provides some DFS administrative access.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DFSAdmin -->
+ <!-- start class org.apache.hadoop.dfs.DFSck -->
+ <class name="DFSck" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="DFSck" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Filesystem checker.
+ @param conf current Configuration
+ @throws Exception]]>
+ </doc>
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
+ sub-optimal conditions.
+ <p>The tool scans all files and directories, starting from an indicated
+ root path. The following abnormal conditions are detected and handled:</p>
+ <ul>
+ <li>files with blocks that are completely missing from all datanodes.<br/>
+ In this case the tool can perform one of the following actions:
+ <ul>
+ <li>none ({@link NamenodeFsck#FIXING_NONE})</li>
+ <li>move corrupted files to /lost+found directory on DFS
+ ({@link NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a
+ block chains, representing longest consecutive series of valid blocks.</li>
+ <li>delete corrupted files ({@link NamenodeFsck#FIXING_DELETE})</li>
+ </ul>
+ </li>
+ <li>detect files with under-replicated or over-replicated blocks</li>
+ </ul>
+ Additionally, the tool collects a detailed overall DFS statistics, and
+ optionally can print detailed statistics on block locations and replication
+ factors of each file.
+ The tool also provides and option to filter open files during the scan.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DFSck -->
+ <!-- start class org.apache.hadoop.dfs.DistributedFileSystem -->
+ <class name="DistributedFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Permit paths which explicitly specify the default port.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename files/dirs]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get rid of Path f, whether a true file or dir.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[requires a boolean check to delete a non
+ empty directory recursively.]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDiskStatus" return="org.apache.hadoop.dfs.DistributedFileSystem.DiskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the disk usage of the filesystem, including total capacity,
+ used space, and remaining space]]>
+ </doc>
+ </method>
+ <method name="getRawCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw capacity of the filesystem, disregarding
+ replication .]]>
+ </doc>
+ </method>
+ <method name="getRawUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total raw used space in the filesystem, disregarding
+ replication .]]>
+ </doc>
+ </method>
+ <method name="getDataNodeStats" return="org.apache.hadoop.dfs.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return statistics for each datanode.]]>
+ </doc>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.dfs.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Enter, leave or get safe mode.
+
+ @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(
+ FSConstants.SafeModeAction)]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalize previously upgraded files system state.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.dfs.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.dfs.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[We need to find the blocks that didn't match. Likely only one
+ is corrupt but we will report both to the namenode. In the future,
+ we can consider figuring out exactly which block is corrupt.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the stat information about the file.
+ @throws FileNotFoundException if the file does not exist.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implementation of the abstract FileSystem for the DFS system.
+ This object is the way end-user code interacts with a Hadoop
+ DistributedFileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DistributedFileSystem -->
+ <!-- start class org.apache.hadoop.dfs.DistributedFileSystem.DiskStatus -->
+ <class name="DistributedFileSystem.DiskStatus" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedFileSystem.DiskStatus" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.DistributedFileSystem.DiskStatus -->
+ <!-- start class org.apache.hadoop.dfs.FileDataServlet -->
+ <class name="FileDataServlet" extends="org.apache.hadoop.dfs.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileDataServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Service a GET request as described below.
+ Request:
+ {@code
+ GET http://<nn>:<port>/data[/<path>] HTTP/1.1
+ }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Redirect queries about the hosted filesystem to an appropriate datanode.
+ @see org.apache.hadoop.dfs.HftpFileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FileDataServlet -->
+ <!-- start class org.apache.hadoop.dfs.FsckServlet -->
+ <class name="FsckServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FsckServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in Namesystem's jetty to do fsck on namenode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FsckServlet -->
+ <!-- start interface org.apache.hadoop.dfs.FSConstants -->
+ <interface name="FSConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="MIN_BLOCKS_FOR_WRITE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_ERROR" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_HEARTBEAT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_BLOCKRECEIVED" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_BLOCKREPORT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_TRANSFERDATA" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_OPEN" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_STARTFILE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_ADDBLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RENAMETO" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_DELETE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_COMPLETEFILE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_LISTING" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_OBTAINLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RELEASELOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_EXISTS" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_ISDIR" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_MKDIRS" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RENEW_LEASE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_ABANDONBLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RAWSTATS" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_DATANODEREPORT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_DATANODE_HINTS" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_TRANSFERBLOCKS" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_INVALIDATE_BLOCKS" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_FAILURE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_OPEN_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_STARTFILE_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_ADDBLOCK_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RENAMETO_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_DELETE_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_COMPLETEFILE_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_TRYAGAIN" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_LISTING_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_OBTAINLOCK_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RELEASELOCK_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_EXISTS_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_ISDIR_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_MKDIRS_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RENEW_LEASE_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_ABANDONBLOCK_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_RAWSTATS_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_DATANODEREPORT_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_CLIENT_DATANODE_HINTS_ACK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_WRITE_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_READ_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_READ_METADATA" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_REPLACE_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_COPY_BLOCK" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_ERROR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_ERROR_CHECKSUM" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_ERROR_INVALID" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_ERROR_EXISTS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OP_STATUS_CHECKSUM_OK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DATA_TRANSFER_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version for data transfers between clients and datanodes
+ This should change when serialization of DatanodeInfo, not just
+ when protocol changes. It is not very obvious.]]>
+ </doc>
+ </field>
+ <field name="OPERATION_FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STILL_WAITING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE_SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCK_INVALIDATE_CHUNK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HEARTBEAT_INTERVAL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCKREPORT_INTERVAL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BLOCKREPORT_INITIAL_DELAY" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_SOFTLIMIT_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LEASE_HARDLIMIT_PERIOD" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ_TIMEOUT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE_TIMEOUT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE_TIMEOUT_EXTENSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_PATH_LENGTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_PATH_DEPTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SMALL_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BLOCK_SIZE" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_DATA_SOCKET_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SIZE_OF_INTEGER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAYOUT_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Some handy constants]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.FSConstants -->
+ <!-- start class org.apache.hadoop.dfs.FSConstants.CheckpointStates -->
+ <class name="FSConstants.CheckpointStates" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.FSConstants.CheckpointStates&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.FSConstants.CheckpointStates[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.CheckpointStates"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSConstants.CheckpointStates -->
+ <!-- start class org.apache.hadoop.dfs.FSConstants.DatanodeReportType -->
+ <class name="FSConstants.DatanodeReportType" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.FSConstants.DatanodeReportType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.FSConstants.DatanodeReportType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.DatanodeReportType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSConstants.DatanodeReportType -->
+ <!-- start class org.apache.hadoop.dfs.FSConstants.NodeType -->
+ <class name="FSConstants.NodeType" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.FSConstants.NodeType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.FSConstants.NodeType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Type of the node]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSConstants.NodeType -->
+ <!-- start class org.apache.hadoop.dfs.FSConstants.SafeModeAction -->
+ <class name="FSConstants.SafeModeAction" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.FSConstants.SafeModeAction&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.FSConstants.SafeModeAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.SafeModeAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSConstants.SafeModeAction -->
+ <!-- start class org.apache.hadoop.dfs.FSConstants.StartupOption -->
+ <class name="FSConstants.StartupOption" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.FSConstants.StartupOption&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.FSConstants.StartupOption[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.StartupOption"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSConstants.StartupOption -->
+ <!-- start class org.apache.hadoop.dfs.FSConstants.UpgradeAction -->
+ <class name="FSConstants.UpgradeAction" extends="java.lang.Enum&lt;org.apache.hadoop.dfs.FSConstants.UpgradeAction&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.dfs.FSConstants.UpgradeAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.UpgradeAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Distributed upgrade actions:
+
+ 1. Get upgrade status.
+ 2. Get detailed upgrade status.
+ 3. Proceed with the upgrade if it is stuck, no matter what the status is.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSConstants.UpgradeAction -->
+ <!-- start interface org.apache.hadoop.dfs.FSDatasetInterface -->
+ <interface name="FSDatasetInterface" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean"/>
+ <method name="getMetaDataLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the length of the metadata file of the specified block
+ @param b - the block for which the metadata length is desired
+ @return the length of the metadata file for the specified block.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getMetaDataInputStream" return="org.apache.hadoop.dfs.FSDatasetInterface.MetaDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns metaData of block b as an input stream (and its length)
+ @param b - the block
+ @return the metadata input stream;
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="metaFileExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Does the meta file exist for this block?
+ @param b - the block
+ @return true of the metafile for specified block exits
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the specified block's on-disk length (excluding metadata)
+ @param b
+ @return the specified block's on-disk length (excluding metadta)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getStoredBlock" return="org.apache.hadoop.dfs.Block"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blkid" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return the generation stamp stored with the block.]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream to read the contents of the specified block
+ @param b
+ @return an input stream to read the contents of the specified block
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlockInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <param name="seekOffset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns an input stream at specified offset of the specified block
+ @param b
+ @param seekOffset
+ @return an input stream to read the contents of the specified block,
+ starting at the offset
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeToBlock" return="org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <param name="isRecovery" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the block and returns output streams to write data and CRC
+ @param b
+ @param isRecovery True if this is part of erro recovery, otherwise false
+ @return a BlockWriteStreams object to allow writing the block data
+ and CRC
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="updateBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="oldblock" type="org.apache.hadoop.dfs.Block"/>
+ <param name="newblock" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Update the block to the new generation stamp and length.]]>
+ </doc>
+ </method>
+ <method name="finalizeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finalizes the block previously opened for writing using writeToBlock.
+ The block size is what is in the parameter b and it must match the amount
+ of data written
+ @param b
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unfinalizeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unfinalizes the block previously opened for writing using writeToBlock.
+ The temporary file associated with this block is deleted.
+ @param b
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getBlockReport" return="org.apache.hadoop.dfs.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the block report - the full list of blocks stored
+ @return - the block report - the full list of blocks stored]]>
+ </doc>
+ </method>
+ <method name="isValidBlock" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <doc>
+ <![CDATA[Is the block valid?
+ @param b
+ @return - true if the specified block is valid]]>
+ </doc>
+ </method>
+ <method name="invalidate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="invalidBlks" type="org.apache.hadoop.dfs.Block[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Invalidates the specified blocks
+ @param invalidBlks - the blocks to be invalidated
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkDataDir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ <doc>
+ <![CDATA[Check if all the data directories are healthy
+ @throws DiskErrorException]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stringifies the name of the storage]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shutdown the FSDataset]]>
+ </doc>
+ </method>
+ <method name="getChannelPosition" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <param name="stream" type="org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current offset in the data stream.
+ @param b
+ @param stream The stream to the data file and checksum file
+ @return the position of the file pointer in the data stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setChannelPosition"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <param name="stream" type="org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams"/>
+ <param name="dataOffset" type="long"/>
+ <param name="ckOffset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets the file pointer of the data stream and checksum stream to
+ the specified values.
+ @param b
+ @param stream The stream for the data file and checksum file
+ @param dataOffset The position to which the file pointre for the data stream
+ should be set
+ @param ckOffset The position to which the file pointre for the checksum stream
+ should be set
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is an interface for the underlying storage that stores blocks for
+ a data node.
+ Examples are the FSDataset (which stores blocks on dirs) and
+ SimulatedFSDataset (which simulates data).]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.FSDatasetInterface -->
+ <!-- start class org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams -->
+ <class name="FSDatasetInterface.BlockWriteStreams" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This class contains the output streams for the data and checksum
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams -->
+ <!-- start class org.apache.hadoop.dfs.FSDatasetInterface.MetaDataInputStream -->
+ <class name="FSDatasetInterface.MetaDataInputStream" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides the input stream and length of the metadata
+ of a block]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSDatasetInterface.MetaDataInputStream -->
+ <!-- start class org.apache.hadoop.dfs.FSNamesystemMetrics -->
+ <class name="FSNamesystemMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.
+ We set the metrics value within this function before pushing it out.
+ FSNamesystem updates its own local variables which are
+ light weight compared to Metrics counters.
+
+ Some of the metrics are explicity casted to int. Few metrics collectors
+ do not handle long values. It is safe to cast to int for now as all these
+ values fit in int value.
+ Metrics related to DFS capacity are stored in bytes which do not fit in
+ int, so they are rounded to GB]]>
+ </doc>
+ </method>
+ <field name="filesTotal" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksTotal" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="capacityTotalGB" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="capacityUsedGB" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="capacityRemainingGB" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="totalLoad" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="pendingReplicationBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="underReplicatedBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="scheduledReplicationBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various FSNamesystem status metrics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #filesTotal}.set()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.FSNamesystemMetrics -->
+ <!-- start class org.apache.hadoop.dfs.GetImageServlet -->
+ <class name="GetImageServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GetImageServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in Namesystem's jetty to retrieve a file.
+ Typically used by the Secondary NameNode to retrieve image and
+ edit file for periodic checkpointing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.GetImageServlet -->
+ <!-- start class org.apache.hadoop.dfs.HftpFileSystem -->
+ <class name="HftpFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HftpFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="openConnection" return="java.net.HttpURLConnection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="query" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open an HTTP connection to the namenode to read file data and metadata.
+ @param path The path component of the URL
+ @param query The query component of the URL]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="buffersize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="nnAddr" type="java.net.InetSocketAddress"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ugi" type="org.apache.hadoop.security.UserGroupInformation"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="df" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of a protocol for accessing filesystems over HTTP.
+ The following implementation provides a limited, read-only interface
+ to a filesystem over HTTP.
+ @see org.apache.hadoop.dfs.ListPathsServlet
+ @see org.apache.hadoop.dfs.FileDataServlet]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.HftpFileSystem -->
+ <!-- start class org.apache.hadoop.dfs.HsftpFileSystem -->
+ <class name="HsftpFileSystem" extends="org.apache.hadoop.dfs.HftpFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HsftpFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="openConnection" return="java.net.HttpURLConnection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="query" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of a protocol for accessing filesystems over HTTPS.
+ The following implementation provides a limited, read-only interface
+ to a filesystem over HTTPS.
+ @see org.apache.hadoop.dfs.ListPathsServlet
+ @see org.apache.hadoop.dfs.FileDataServlet]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.HsftpFileSystem -->
+ <!-- start class org.apache.hadoop.dfs.JspHelper -->
+ <class name="JspHelper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JspHelper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="randomNode" return="org.apache.hadoop.dfs.DatanodeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="bestNode" return="org.apache.hadoop.dfs.DatanodeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blk" type="org.apache.hadoop.dfs.LocatedBlock"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="streamBlockInAscii"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="blockId" type="long"/>
+ <param name="genStamp" type="long"/>
+ <param name="blockSize" type="long"/>
+ <param name="offsetIntoBlock" type="long"/>
+ <param name="chunkSizeToView" type="long"/>
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="DFSNodesStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="live" type="java.util.ArrayList&lt;org.apache.hadoop.dfs.DatanodeDescriptor&gt;"/>
+ <param name="dead" type="java.util.ArrayList&lt;org.apache.hadoop.dfs.DatanodeDescriptor&gt;"/>
+ </method>
+ <method name="addTableHeader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableRow"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="columns" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableRow"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="columns" type="java.lang.String[]"/>
+ <param name="row" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addTableFooter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSafeModeText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInodeLimitText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUpgradeStatusText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="sortNodeList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodes" type="java.util.ArrayList&lt;org.apache.hadoop.dfs.DatanodeDescriptor&gt;"/>
+ <param name="field" type="java.lang.String"/>
+ <param name="order" type="java.lang.String"/>
+ </method>
+ <method name="printPathWithLinks"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.lang.String"/>
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="namenodeInfoPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="printGotoForm"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="namenodeInfoPort" type="int"/>
+ <param name="file" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createTitle"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="javax.servlet.jsp.JspWriter"/>
+ <param name="req" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="file" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="percentageGraph" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="perc" type="int"/>
+ <param name="width" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="percentageGraph" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="perc" type="float"/>
+ <param name="width" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="WEB_UGI_PROPERTY_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="conf" type="org.apache.hadoop.conf.Configuration"
+ transient="false" volatile="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="webUGI" type="org.apache.hadoop.security.UnixUserGroupInformation"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.JspHelper -->
+ <!-- start class org.apache.hadoop.dfs.LeaseExpiredException -->
+ <class name="LeaseExpiredException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LeaseExpiredException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The lease that was being used to create this file has expired.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.LeaseExpiredException -->
+ <!-- start class org.apache.hadoop.dfs.ListPathsServlet -->
+ <class name="ListPathsServlet" extends="org.apache.hadoop.dfs.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ListPathsServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="buildRoot" return="java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="doc" type="org.znerd.xmlenc.XMLOutputter"/>
+ <doc>
+ <![CDATA[Build a map from the query string, setting values and defaults.]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Service a GET request as described below.
+ Request:
+ {@code
+ GET http://<nn>:<port>/listPaths[/<path>][<?option>[&option]*] HTTP/1.1
+ }
+
+ Where <i>option</i> (default) in:
+ recursive (&quot;no&quot;)
+ filter (&quot;.*&quot;)
+ exclude (&quot;\..*\.crc&quot;)
+
+ Response: A flat list of files/directories in the following format:
+ {@code
+ <listing path="..." recursive="(yes|no)" filter="..."
+ time="yyyy-MM-dd hh:mm:ss UTC" version="...">
+ <directory path="..." modified="yyyy-MM-dd hh:mm:ss"/>
+ <file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ" blocksize="..."
+ replication="..." size="..."/>
+ </listing>
+ }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Obtain meta-information about a filesystem.
+ @see org.apache.hadoop.dfs.HftpFileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.ListPathsServlet -->
+ <!-- start class org.apache.hadoop.dfs.LocatedBlocks -->
+ <class name="LocatedBlocks" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getLocatedBlocks" return="java.util.List&lt;org.apache.hadoop.dfs.LocatedBlock&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get located blocks.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.dfs.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ <doc>
+ <![CDATA[Get located block.]]>
+ </doc>
+ </method>
+ <method name="locatedBlockCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get number of located blocks.]]>
+ </doc>
+ </method>
+ <method name="getFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isUnderConstruction" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return ture if file was under construction when
+ this LocatedBlocks was constructed, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Collection of blocks with their locations and the file length.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.LocatedBlocks -->
+ <!-- start class org.apache.hadoop.dfs.NameNode -->
+ <class name="NameNode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.dfs.ClientProtocol"/>
+ <implements name="org.apache.hadoop.dfs.DatanodeProtocol"/>
+ <implements name="org.apache.hadoop.dfs.NamenodeProtocol"/>
+ <implements name="org.apache.hadoop.dfs.FSConstants"/>
+ <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start NameNode.
+ <p>
+ The name-node can be started with one of the following startup options:
+ <ul>
+ <li>{@link FSConstants.StartupOption#REGULAR REGULAR} - normal startup</li>
+ <li>{@link FSConstants.StartupOption#FORMAT FORMAT} - format name node</li>
+ <li>{@link FSConstants.StartupOption#UPGRADE UPGRADE} - start the cluster
+ upgrade and create a snapshot of the current file system state</li>
+ <li>{@link FSConstants.StartupOption#ROLLBACK ROLLBACK} - roll the
+ cluster back to the previous state</li>
+ </ul>
+ The option is passed via configuration field:
+ <tt>dfs.namenode.startup</tt>
+
+ The conf will be modified to reflect the actual ports on which
+ the NameNode is up and running if the user passes the port as
+ <code>zero</code> in the conf.
+
+ @param conf confirguration
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="NameNode" type="java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a NameNode at the specified location and start it.
+
+ The conf will be modified to reflect the actual ports on which
+ the NameNode is up and running if the user passes the port as
+ <code>zero</code>.]]>
+ </doc>
+ </constructor>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="format"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Format a new filesystem. Destroys any filesystem that may already
+ exist at this location.]]>
+ </doc>
+ </method>
+ <method name="getNameNodeMetrics" return="org.apache.hadoop.dfs.NameNodeMetrics"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Wait for service to finish.
+ (Normally, it runs forever.)]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop all NameNode threads and wait for all to finish.]]>
+ </doc>
+ </method>
+ <method name="getBlocks" return="org.apache.hadoop.dfs.BlocksWithLocations"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="datanode" type="org.apache.hadoop.dfs.DatanodeInfo"/>
+ <param name="size" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[return a list of blocks & their locations on <code>datanode</code> whose
+ total size is <code>size</code>
+
+ @param datanode on which blocks are located
+ @param size total size of blocks]]>
+ </doc>
+ </method>
+ <method name="getBlockLocations" return="org.apache.hadoop.dfs.LocatedBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="offset" type="long"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="clientName" type="java.lang.String"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="permissions" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="addBlock" return="org.apache.hadoop.dfs.LocatedBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="abandonBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="org.apache.hadoop.dfs.Block"/>
+ <param name="src" type="java.lang.String"/>
+ <param name="holder" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client needs to give up on the block.]]>
+ </doc>
+ </method>
+ <method name="complete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="reportBadBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blocks" type="org.apache.hadoop.dfs.LocatedBlock[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The client has detected an error on the specified located blocks
+ and is reporting them to the server. For now, the namenode will
+ mark the block as corrupt. In the future we might
+ check the blocks are actually corrupt.]]>
+ </doc>
+ </method>
+ <method name="nextGenerationStamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.dfs.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="commitBlockSynchronization"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.dfs.Block"/>
+ <param name="newgenerationstamp" type="long"/>
+ <param name="newlength" type="long"/>
+ <param name="closeFile" type="boolean"/>
+ <param name="deleteblock" type="boolean"/>
+ <param name="newtargets" type="org.apache.hadoop.dfs.DatanodeID[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getPreferredBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="dst" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="renewLease"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getListing" return="org.apache.hadoop.dfs.DFSFileInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileInfo" return="org.apache.hadoop.dfs.DFSFileInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the file info for a specific file.
+ @param src The string representation of the path to the file
+ @throws IOException if permission to access file is denied by the system
+ @return object containing information regarding the file
+ or null if file not found]]>
+ </doc>
+ </method>
+ <method name="getStats" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getDatanodeReport" return="org.apache.hadoop.dfs.DatanodeInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="org.apache.hadoop.dfs.FSConstants.DatanodeReportType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.dfs.FSConstants.SafeModeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="isInSafeMode" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the cluster currently in safe mode?]]>
+ </doc>
+ </method>
+ <method name="refreshNodes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getEditLogSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the size of the current edit log.]]>
+ </doc>
+ </method>
+ <method name="rollEditLog" return="org.apache.hadoop.dfs.CheckpointSignature"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Roll the edit log.]]>
+ </doc>
+ </method>
+ <method name="rollFsImage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Roll the image]]>
+ </doc>
+ </method>
+ <method name="finalizeUpgrade"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="distributedUpgradeProgress" return="org.apache.hadoop.dfs.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="action" type="org.apache.hadoop.dfs.FSConstants.UpgradeAction"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="metaSave"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Dumps namenode state into specified file]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setQuota"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="quota" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="clearQuota"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="fsync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.lang.String"/>
+ <param name="clientName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="register" return="org.apache.hadoop.dfs.DatanodeRegistration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="sendHeartbeat" return="org.apache.hadoop.dfs.DatanodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/>
+ <param name="capacity" type="long"/>
+ <param name="dfsUsed" type="long"/>
+ <param name="remaining" type="long"/>
+ <param name="xmitsInProgress" type="int"/>
+ <param name="xceiverCount" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Data node notify the name node that it is alive
+ Return a block-oriented command for the datanode to execute.
+ This will be either a transfer or a delete operation.]]>
+ </doc>
+ </method>
+ <method name="blockReport" return="org.apache.hadoop.dfs.DatanodeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/>
+ <param name="blocks" type="long[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockReceived"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/>
+ <param name="blocks" type="org.apache.hadoop.dfs.Block[]"/>
+ <param name="delHints" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="errorReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/>
+ <param name="errorCode" type="int"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="versionRequest" return="org.apache.hadoop.dfs.NamespaceInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="processUpgradeCommand" return="org.apache.hadoop.dfs.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="comm" type="org.apache.hadoop.dfs.UpgradeCommand"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="verifyRequest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify request.
+
+ Verifies correctness of the datanode version, registration ID, and
+ if the datanode does not need to be shutdown.
+
+ @param nodeReg data node registration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="verifyVersion"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="version" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify version.
+
+ @param version
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFsImageName" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the name of the fsImage file]]>
+ </doc>
+ </method>
+ <method name="getFsImageNameCheckpoint" return="java.io.File[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the name of the fsImage file uploaded by periodic
+ checkpointing]]>
+ </doc>
+ </method>
+ <method name="getNameNodeAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the address on which the NameNodes is listening to.
+ @return the address on which the NameNodes is listening to.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="DEFAULT_PORT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="stateChangeLog" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[NameNode serves as both directory namespace manager and
+ "inode table" for the Hadoop DFS. There is a single NameNode
+ running in any DFS deployment. (Well, except when there
+ is a second backup/failover NameNode.)
+
+ The NameNode controls two critical tables:
+ 1) filename->blocksequence (namespace)
+ 2) block->machinelist ("inodes")
+
+ The first table is stored on disk and is very precious.
+ The second table is rebuilt every time the NameNode comes
+ up.
+
+ 'NameNode' refers to both this class as well as the 'NameNode server'.
+ The 'FSNamesystem' class actually performs most of the filesystem
+ management. The majority of the 'NameNode' class itself is concerned
+ with exposing the IPC interface to the outside world, plus some
+ configuration management.
+
+ NameNode implements the ClientProtocol interface, which allows
+ clients to ask for DFS services. ClientProtocol is not
+ designed for direct use by authors of DFS client code. End-users
+ should instead use the org.apache.nutch.hadoop.fs.FileSystem class.
+
+ NameNode also implements the DatanodeProtocol interface, used by
+ DataNode programs that actually store DFS data blocks. These
+ methods are invoked repeatedly and automatically by all the
+ DataNodes in a DFS deployment.
+
+ NameNode also implements the NamenodeProtocol interface, used by
+ secondary namenodes or rebalancing processes to get partial namenode's
+ state, for example partial blocksMap etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.NameNode -->
+ <!-- start class org.apache.hadoop.dfs.NamenodeFsck -->
+ <class name="NamenodeFsck" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NamenodeFsck" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.dfs.NameNode, java.util.Map&lt;java.lang.String, java.lang.String[]&gt;, javax.servlet.http.HttpServletResponse"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filesystem checker.
+ @param conf configuration (namenode config)
+ @param nn namenode that this fsck is going to use
+ @param pmap key=value[] map that is passed to the http servlet as url parameters
+ @param response the object into which this servelet writes the url contents
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="fsck"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check files on DFS, starting from the indicated path.
+ @throws Exception]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FIXING_NONE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Don't attempt any fixing .]]>
+ </doc>
+ </field>
+ <field name="FIXING_MOVE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Move corrupted files to /lost+found .]]>
+ </doc>
+ </field>
+ <field name="FIXING_DELETE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Delete corrupted files.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
+ sub-optimal conditions.
+ <p>The tool scans all files and directories, starting from an indicated
+ root path. The following abnormal conditions are detected and handled:</p>
+ <ul>
+ <li>files with blocks that are completely missing from all datanodes.<br/>
+ In this case the tool can perform one of the following actions:
+ <ul>
+ <li>none ({@link #FIXING_NONE})</li>
+ <li>move corrupted files to /lost+found directory on DFS
+ ({@link #FIXING_MOVE}). Remaining data blocks are saved as a
+ block chains, representing longest consecutive series of valid blocks.</li>
+ <li>delete corrupted files ({@link #FIXING_DELETE})</li>
+ </ul>
+ </li>
+ <li>detect files with under-replicated or over-replicated blocks</li>
+ </ul>
+ Additionally, the tool collects a detailed overall DFS statistics, and
+ optionally can print detailed statistics on block locations and replication
+ factors of each file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.NamenodeFsck -->
+ <!-- start class org.apache.hadoop.dfs.NamenodeFsck.FsckResult -->
+ <class name="NamenodeFsck.FsckResult" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NamenodeFsck.FsckResult"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isHealthy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[DFS is considered healthy if there are no missing blocks.]]>
+ </doc>
+ </method>
+ <method name="addMissing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Add a missing block name, plus its size.]]>
+ </doc>
+ </method>
+ <method name="getMissingIds" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a list of missing block names (as list of Strings).]]>
+ </doc>
+ </method>
+ <method name="getMissingSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total size of missing data, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setMissingSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="missingSize" type="long"/>
+ </method>
+ <method name="getExcessiveReplicas" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of over-replicated blocks.]]>
+ </doc>
+ </method>
+ <method name="setExcessiveReplicas"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="overReplicatedBlocks" type="long"/>
+ </method>
+ <method name="getReplicationFactor" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the actual replication factor.]]>
+ </doc>
+ </method>
+ <method name="getMissingReplicas" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of under-replicated blocks. Note: missing blocks are not counted here.]]>
+ </doc>
+ </method>
+ <method name="setMissingReplicas"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="underReplicatedBlocks" type="long"/>
+ </method>
+ <method name="getTotalDirs" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total number of directories encountered during this scan.]]>
+ </doc>
+ </method>
+ <method name="setTotalDirs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalDirs" type="long"/>
+ </method>
+ <method name="getTotalFiles" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total number of files encountered during this scan.]]>
+ </doc>
+ </method>
+ <method name="setTotalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalFiles" type="long"/>
+ </method>
+ <method name="getTotalOpenFiles" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total number of files opened for write encountered during this scan.]]>
+ </doc>
+ </method>
+ <method name="setTotalOpenFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalOpenFiles" type="long"/>
+ <doc>
+ <![CDATA[Set total number of open files encountered during this scan.]]>
+ </doc>
+ </method>
+ <method name="getTotalSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total size of scanned data, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setTotalSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalSize" type="long"/>
+ </method>
+ <method name="getTotalOpenFilesSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return total size of open files data, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setTotalOpenFilesSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalOpenFilesSize" type="long"/>
+ </method>
+ <method name="getReplication" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the intended replication factor, against which the over/under-
+ replicated blocks are counted. Note: this values comes from the current
+ Configuration supplied for the tool, so it may be different from the
+ value in DFS Configuration.]]>
+ </doc>
+ </method>
+ <method name="setReplication"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="replication" type="int"/>
+ </method>
+ <method name="getTotalBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of blocks in the scanned area.]]>
+ </doc>
+ </method>
+ <method name="setTotalBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalBlocks" type="long"/>
+ </method>
+ <method name="getTotalOpenFilesBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of blocks held by open files.]]>
+ </doc>
+ </method>
+ <method name="setTotalOpenFilesBlocks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="totalOpenFilesBlocks" type="long"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCorruptFiles" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of currupted files.]]>
+ </doc>
+ </method>
+ <method name="setCorruptFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="corruptFiles" type="long"/>
+ </method>
+ <doc>
+ <![CDATA[FsckResult of checking, plus overall DFS statistics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.NamenodeFsck.FsckResult -->
+ <!-- start class org.apache.hadoop.dfs.NameNodeMetrics -->
+ <class name="NameNodeMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="numFilesCreated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numGetBlockLocations" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numFilesRenamed" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numGetListingOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numCreateFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numDeleteFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numAddBlockOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="transactions" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="syncs" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockReport" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="safeModeTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="fsImageLoadTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numBlocksCorrupted" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various NameNode statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #syncs}.inc()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.NameNodeMetrics -->
+ <!-- start class org.apache.hadoop.dfs.NotReplicatedYetException -->
+ <class name="NotReplicatedYetException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NotReplicatedYetException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[The file has not finished being written to enough datanodes yet.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.NotReplicatedYetException -->
+ <!-- start class org.apache.hadoop.dfs.QuotaExceededException -->
+ <class name="QuotaExceededException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="QuotaExceededException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="QuotaExceededException" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setPathName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class is for the error when an attempt to add an inode to namespace
+ violates the quota restriction of any inode on the path to the newly added
+ inode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.QuotaExceededException -->
+ <!-- start class org.apache.hadoop.dfs.SafeModeException -->
+ <class name="SafeModeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SafeModeException" type="java.lang.String, org.apache.hadoop.dfs.FSNamesystem.SafeModeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when the name node is in safe mode.
+ Client cannot modified namespace until the safe mode is off.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.SafeModeException -->
+ <!-- start class org.apache.hadoop.dfs.SecondaryNameNode -->
+ <class name="SecondaryNameNode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.dfs.FSConstants"/>
+ <implements name="java.lang.Runnable"/>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down this instance of the datanode.
+ Returns only after shutdown is complete.]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods.
+ @param argv Command line parameters.
+ @exception Exception if the filesystem does not exist.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The Secondary NameNode is a helper to the primary NameNode.
+ The Secondary is responsible for supporting periodic checkpoints
+ of the HDFS metadata. The current design allows only one Secondary
+ NameNode per HDFs cluster.
+
+ The Secondary NameNode is a daemon that periodically wakes
+ up (determined by the schedule specified in the configuration),
+ triggers a periodic checkpoint and then goes back to sleep.
+ The Secondary NameNode uses the ClientProtocol to talk to the
+ primary NameNode.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.SecondaryNameNode -->
+ <!-- start class org.apache.hadoop.dfs.StreamFile -->
+ <class name="StreamFile" extends="org.apache.hadoop.dfs.DfsServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StreamFile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.StreamFile -->
+ <!-- start interface org.apache.hadoop.dfs.Upgradeable -->
+ <interface name="Upgradeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable&lt;org.apache.hadoop.dfs.Upgradeable&gt;"/>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the layout version of the upgrade object.
+ @return layout version]]>
+ </doc>
+ </method>
+ <method name="getType" return="org.apache.hadoop.dfs.FSConstants.NodeType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of the software component, which this object is upgrading.
+ @return type]]>
+ </doc>
+ </method>
+ <method name="getDescription" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Description of the upgrade object for displaying.
+ @return description]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Upgrade status determines a percentage of the work done out of the total
+ amount required by the upgrade.
+
+ 100% means that the upgrade is completed.
+ Any value < 100 means it is not complete.
+
+ The return value should provide at least 2 values, e.g. 0 and 100.
+ @return integer value in the range [0, 100].]]>
+ </doc>
+ </method>
+ <method name="startUpgrade" return="org.apache.hadoop.dfs.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Prepare for the upgrade.
+ E.g. initialize upgrade data structures and set status to 0.
+
+ Returns an upgrade command that is used for broadcasting to other cluster
+ components.
+ E.g. name-node informs data-nodes that they must perform a distributed upgrade.
+
+ @return an UpgradeCommand for broadcasting.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="completeUpgrade" return="org.apache.hadoop.dfs.UpgradeCommand"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Complete upgrade.
+ E.g. cleanup upgrade data structures or write metadata to disk.
+
+ Returns an upgrade command that is used for broadcasting to other cluster
+ components.
+ E.g. data-nodes inform the name-node that they completed the upgrade
+ while other data-nodes are still upgrading.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatusReport" return="org.apache.hadoop.dfs.UpgradeStatusReport"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status report for the upgrade.
+
+ @param details true if upgradeStatus details need to be included,
+ false otherwise
+ @return {@link UpgradeStatusReport}
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Common interface for distributed upgrade objects.
+
+ Each upgrade object corresponds to a layout version,
+ which is the latest version that should be upgraded using this object.
+ That is all components whose layout version is greater or equal to the
+ one returned by {@link #getVersion()} must be upgraded with this object.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.Upgradeable -->
+ <!-- start class org.apache.hadoop.dfs.UpgradeStatusReport -->
+ <class name="UpgradeStatusReport" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="UpgradeStatusReport"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UpgradeStatusReport" type="int, short, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the layout version of the currently running upgrade.
+ @return layout version]]>
+ </doc>
+ </method>
+ <method name="getUpgradeStatus" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get upgrade upgradeStatus as a percentage of the total upgrade done.
+
+ @see Upgradeable#getUpgradeStatus()]]>
+ </doc>
+ </method>
+ <method name="isFinalized" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is current upgrade finalized.
+ @return true if finalized or false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getStatusText" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="details" type="boolean"/>
+ <doc>
+ <![CDATA[Get upgradeStatus data as a text for reporting.
+ Should be overloaded for a particular upgrade specific upgradeStatus data.
+
+ @param details true if upgradeStatus details need to be included,
+ false otherwise
+ @return text]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Print basic upgradeStatus details.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="version" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="upgradeStatus" type="short"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="finalized" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Base upgrade upgradeStatus class.
+ Overload this class if specific status fields need to be reported.
+
+ Describes status of current upgrade.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.UpgradeStatusReport -->
+</package>
+<package name="org.apache.hadoop.dfs.datanode.metrics">
+ <!-- start class org.apache.hadoop.dfs.datanode.metrics.DataNodeMetrics -->
+ <class name="DataNodeMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="DataNodeMetrics" type="org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="bytesWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bytesRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksReplicated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksRemoved" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blocksVerified" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockVerificationFailures" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readsFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readsFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writesFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writesFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="writeBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="readMetadataOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="copyBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="replaceBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="heartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="blockReports" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various DataNode statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #blocksRead}.inc()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.datanode.metrics.DataNodeMetrics -->
+ <!-- start class org.apache.hadoop.dfs.datanode.metrics.DataNodeStatistics -->
+ <class name="DataNodeStatistics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean"/>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shuts down the statistics
+ - unregisters the mbean]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlocksRead" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlocksRemoved" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlocksReplicated" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlocksWritten" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockVerificationFailures" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlocksVerified" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadsFromLocalClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadsFromRemoteClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getWritesFromLocalClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getWritesFromRemoteClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.datanode.metrics.DataNodeStatistics -->
+ <!-- start interface org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean -->
+ <interface name="DataNodeStatisticsMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getBytesRead" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of bytes read in the last interval
+ @return number of bytes read]]>
+ </doc>
+ </method>
+ <method name="getBlocksWritten" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of blocks written in the last interval
+ @return number of blocks written]]>
+ </doc>
+ </method>
+ <method name="getBlocksRead" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of blocks read in the last interval
+ @return number of blocks read]]>
+ </doc>
+ </method>
+ <method name="getBlocksReplicated" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of blocks replicated in the last interval
+ @return number of blocks replicated]]>
+ </doc>
+ </method>
+ <method name="getBlocksRemoved" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of blocks removed in the last interval
+ @return number of blocks removed]]>
+ </doc>
+ </method>
+ <method name="getBlocksVerified" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of blocks verified in the last interval
+ @return number of blocks verified]]>
+ </doc>
+ </method>
+ <method name="getBlockVerificationFailures" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of block verification failures in the last interval
+ @return number of block verification failures]]>
+ </doc>
+ </method>
+ <method name="getReadsFromLocalClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of reads from local clients in the last interval
+ @return number of reads from local clients]]>
+ </doc>
+ </method>
+ <method name="getReadsFromRemoteClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of reads from remote clients in the last interval
+ @return number of reads from remote clients]]>
+ </doc>
+ </method>
+ <method name="getWritesFromLocalClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of writes from local clients in the last interval
+ @return number of writes from local clients]]>
+ </doc>
+ </method>
+ <method name="getWritesFromRemoteClient" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of writes from remote clients in the last interval
+ @return number of writes from remote clients]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of ReadBlock Operation in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for ReadBlock Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum ReadBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReadBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum ReadBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of WriteBlock Operation in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for WriteBlock Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum WriteBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getWriteBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum WriteBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of ReadMetadata Operation in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for ReadMetadata Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum ReadMetadata Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReadMetadataOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum ReadMetadata Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of CopyBlock Operation in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for CopyBlock Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum CopyBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getCopyBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum CopyBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of ReplaceBlock Operation in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for ReplaceBlock Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum ReplaceBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getReplaceBlockOpMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum ReplaceBlock Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Block Reports sent in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for Block Reports Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum Block Reports Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getBlockReportsMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum Block Reports Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Heartbeat Operation in last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for Heartbeat Operation in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum Heartbeat Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getHeartbeatsMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum Heartbeat Operation Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all min max times]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX interface for the runtime statistics for the data node.
+ Many of the statistics are sampled and averaged on an interval
+ which can be specified in the config file.
+ <p>
+ For the statistics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ dfs.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically.
+ <p>
+ Name Node Status info is reported in another MBean
+ @see org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean -->
+ <!-- start interface org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean -->
+ <interface name="FSDatasetMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getDfsUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the total space (in bytes) used by dfs datanode
+ @return the total space used by dfs datanode
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns total capacity (in bytes) of storage (used and unused)
+ @return total capacity of storage (used and unused)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the amount of free storage space (in bytes)
+ @return The amount of free storage space
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getStorageInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the storage id of the underlying storage]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This Interface defines the methods to get the status of a the FSDataset of
+ a data node.
+ It is also used for publishing via JMX (hence we follow the JMX naming
+ convention.)
+ <p>
+ Data Node runtime statistic info is report in another MBean
+ @see org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean -->
+</package>
+<package name="org.apache.hadoop.dfs.namenode.metrics">
+ <!-- start interface org.apache.hadoop.dfs.namenode.metrics.FSNamesystemMBean -->
+ <interface name="FSNamesystemMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getFSState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The state of the file system: Safemode or Operational
+ @return the state]]>
+ </doc>
+ </method>
+ <method name="getBlocksTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of allocated blocks in the system
+ @return - number of allocated blocks]]>
+ </doc>
+ </method>
+ <method name="getCapacityTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total storage capacity
+ @return - total capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacityRemaining" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Free (unused) storage capacity
+ @return - free capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacityUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Used storage capacity
+ @return - used capacity in bytes]]>
+ </doc>
+ </method>
+ <method name="getFilesTotal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total number of files and directories
+ @return - num of files and directories]]>
+ </doc>
+ </method>
+ <method name="getPendingReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Blocks pending to be replicated
+ @return - num of blocks to be replicated]]>
+ </doc>
+ </method>
+ <method name="getUnderReplicatedBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Blocks under replicated
+ @return - num of blocks under replicated]]>
+ </doc>
+ </method>
+ <method name="getScheduledReplicationBlocks" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Blocks scheduled for replication
+ @return - num of blocks scheduled for replication]]>
+ </doc>
+ </method>
+ <method name="getTotalLoad" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Total Load on the FSNamesystem
+ @return - total load of FSNamesystem]]>
+ </doc>
+ </method>
+ <method name="numLiveDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Live data nodes
+ @return number of live data nodes]]>
+ </doc>
+ </method>
+ <method name="numDeadDataNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of dead data nodes
+ @return number of dead data nodes]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This Interface defines the methods to get the status of a the FSNamesystem of
+ a name node.
+ It is also used for publishing via JMX (hence we follow the JMX naming
+ convention.)
+
+ <p>
+ Name Node runtime statistic info is report in another MBean
+ @see org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.namenode.metrics.FSNamesystemMBean -->
+ <!-- start class org.apache.hadoop.dfs.namenode.metrics.NameNodeStatistics -->
+ <class name="NameNodeStatistics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean"/>
+ <constructor name="NameNodeStatistics" type="org.apache.hadoop.dfs.NameNodeMetrics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constructs and registers the NameNodeStatisticsMBean
+ @param nameNodeMetrics - the metrics from which the mbean gets its info]]>
+ </doc>
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shuts down the statistics
+ - unregisters the mbean]]>
+ </doc>
+ </method>
+ <method name="getBlockReportAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getBlockReportNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getSafemodeTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getFSImageLoadTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getNumFilesCreated" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getNumFilesListed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call getNumGetListingOps() instead">
+ <doc>
+ <![CDATA[@deprecated call getNumGetListingOps() instead]]>
+ </doc>
+ </method>
+ <method name="getNumGetListingOps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getNumCreateFileOps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getNumDeleteFileOps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getNumAddBlockOps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getNumGetBlockLocations" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <method name="getNumFilesRenamed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@inheritDoc]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the implementation of the Name Node JMX MBean]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.dfs.namenode.metrics.NameNodeStatistics -->
+ <!-- start interface org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean -->
+ <interface name="NameNodeStatisticsMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getSafemodeTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The time spent in the Safemode at startup
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getFSImageLoadTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Time spent loading the FS Image at startup
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Journal Transactions in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for Journal transactions in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum Journal Transaction Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getJournalTransactionMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum Journal Transaction Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getBlockReportNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of block Reports processed in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getBlockReportAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for Block Report Processing in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getBlockReportMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum Block Report Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getBlockReportMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum Block Report Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncNum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of Journal Syncs in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncAverageTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for Journal Sync in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncMinTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum Journal Sync Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getJournalSyncMaxTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum Journal Sync Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all min max times]]>
+ </doc>
+ </method>
+ <method name="getNumFilesCreated" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of files created in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getNumGetBlockLocations" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of
+ {@link org.apache.hadoop.dfs.NameNode#getBlockLocations(String,long,long)}
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getNumFilesRenamed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of files renamed in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getNumFilesListed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getNumGetListingOps() instead">
+ <doc>
+ <![CDATA[Number of files listed in the last interval
+ @return number of operations
+ @deprecated Use getNumGetListingOps() instead]]>
+ </doc>
+ </method>
+ <method name="getNumGetListingOps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of files listed in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getNumCreateFileOps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of file creation operations in the last interval
+ @return number of file creation operations]]>
+ </doc>
+ </method>
+ <method name="getNumDeleteFileOps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of file deletion operations in the last interval
+ @return number of file deletion operations]]>
+ </doc>
+ </method>
+ <method name="getNumAddBlockOps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of add block operations in the last interval
+ @return number of add block operations]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for getting runtime statistics of
+ the name node.
+ Many of the statistics are sampled and averaged on an interval
+ which can be specified in the config file.
+ <p>
+ For the statistics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ dfs.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically.
+ <p>
+ Name Node Status info is report in another MBean
+ @see org.apache.hadoop.dfs.namenode.metrics.FSNamesystemMBean]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean -->
+</package>
+<package name="org.apache.hadoop.filecache">
+ <!-- start class org.apache.hadoop.filecache.DistributedCache -->
+ <class name="DistributedCache" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedCache"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param fileStatus The file status on the dfs.
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred is
+ returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred
+ is returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="releaseCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is the opposite of getlocalcache. When you are done with
+ using the cache, you need to release the cache
+ @param cache The cache URI to be released
+ @param conf configuration which contains the filesystem the cache
+ is contained in.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeRelative" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTimestamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="cache" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns mtime of a given cache file on hdfs.
+ @param conf configuration
+ @param cache cache file
+ @return mtime of a given cache file on hdfs
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createAllSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="jobCacheDir" type="java.io.File"/>
+ <param name="workDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method create symlinks for all files in a given dir in another directory
+ @param conf the configuration
+ @param jobCacheDir the target directory for creating symlinks
+ @param workDir the directory in which the symlinks are created
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setCacheArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archives" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of archives
+ @param archives The list of archives that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="setCacheFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of files
+ @param files The list of files that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="getCacheArchives" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache archives set in the Configuration
+ @param conf The configuration which contains the archives
+ @return A URI array of the caches set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCacheFiles" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache files set in the Configuration
+ @param conf The configuration which contains the files
+ @return A URI array of the files set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheArchives" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized caches
+ @param conf Configuration that contains the localized archives
+ @return A path array of localized caches
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized files
+ @param conf Configuration that contains the localized files
+ @return A path array of localized files
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getArchiveTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the archives
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFileTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the files
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setArchiveTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the archives to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of archives.
+ The order should be the same as the order in which the archives are added.]]>
+ </doc>
+ </method>
+ <method name="setFileTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the files to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of files.
+ The order should be the same as the order in which the files are added.]]>
+ </doc>
+ </method>
+ <method name="setLocalArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized archives
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+ </doc>
+ </method>
+ <method name="setLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized files
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+ </doc>
+ </method>
+ <method name="addCacheArchive"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a archives to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addCacheFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a file to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addFileToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an file path to the current set of classpath entries It adds the file
+ to cache as well.
+
+ @param file Path of the file to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getFileClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the file entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="addArchiveToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archive" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an archive path to the current set of classpath entries. It adds the
+ archive to cache as well.
+
+ @param archive Path of the archive to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getArchiveClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the archive entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method allows you to create symlinks in the current working directory
+ of the task to all the cache files/archives
+ @param conf the jobconf]]>
+ </doc>
+ </method>
+ <method name="getSymlink" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method checks to see if symlinks are to be create for the
+ localized cache files in the current working directory
+ @param conf the jobconf
+ @return true if symlinks are to be created- else return false]]>
+ </doc>
+ </method>
+ <method name="checkURIs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uriFiles" type="java.net.URI[]"/>
+ <param name="uriArchives" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[This method checks if there is a conflict in the fragment names
+ of the uris. Also makes sure that each uri has a fragment. It
+ is only to be called if you want to create symlinks for
+ the various archives and files.
+ @param uriFiles The uri array of urifiles
+ @param uriArchives the uri array of uri archives]]>
+ </doc>
+ </method>
+ <method name="purgeCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clear the entire contents of the cache and delete the backing files. This
+ should only be used when the server is reinitializing, because the users
+ are going to lose their files.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Distribute application-specific large, read-only files efficiently.
+
+ <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
+ framework to cache files (text, archives, jars etc.) needed by applications.
+ </p>
+
+ <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
+ via the {@link JobConf}. The <code>DistributedCache</code> assumes that the
+ files specified via hdfs:// urls are already present on the
+ {@link FileSystem} at the path specified by the url.</p>
+
+ <p>The framework will copy the necessary files on to the slave node before
+ any tasks for the job are executed on that node. Its efficiency stems from
+ the fact that the files are only copied once per job and the ability to
+ cache archives which are un-archived on the slaves.</p>
+
+ <p><code>DistributedCache</code> can be used to distribute simple, read-only
+ data/text files and/or more complex types such as archives, jars etc.
+ Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes.
+ Jars may be optionally added to the classpath of the tasks, a rudimentary
+ software distribution mechanism. Files have execution permissions.
+ Optionally users can also direct it to symlink the distributed cache file(s)
+ into the working directory of the task.</p>
+
+ <p><code>DistributedCache</code> tracks modification timestamps of the cache
+ files. Clearly the cache files should not be modified by the application
+ or externally while the job is executing.</p>
+
+ <p>Here is an illustrative example on how to use the
+ <code>DistributedCache</code>:</p>
+ <p><blockquote><pre>
+ // Setting up the cache for the application
+
+ 1. Copy the requisite files to the <code>FileSystem</code>:
+
+ $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
+ $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
+ $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
+ $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
+ $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
+ $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
+
+ 2. Setup the application's <code>JobConf</code>:
+
+ JobConf job = new JobConf();
+ DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
+ job);
+ DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
+ DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
+
+ 3. Use the cached files in the {@link Mapper} or {@link Reducer}:
+
+ public static class MapClass extends MapReduceBase
+ implements Mapper&lt;K, V, K, V&gt; {
+
+ private Path[] localArchives;
+ private Path[] localFiles;
+
+ public void configure(JobConf job) {
+ // Get the cached archives/files
+ localArchives = DistributedCache.getLocalCacheArchives(job);
+ localFiles = DistributedCache.getLocalCacheFiles(job);
+ }
+
+ public void map(K key, V value,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Use data from the cached archives/files here
+ // ...
+ // ...
+ output.collect(k, v);
+ }
+ }
+
+ </pre></blockquote></p>
+
+ @see JobConf
+ @see JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.filecache.DistributedCache -->
+</package>
+<package name="org.apache.hadoop.fs">
+ <!-- start class org.apache.hadoop.fs.BlockLocation -->
+ <class name="BlockLocation" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlockLocation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with host, name, offset and length]]>
+ </doc>
+ </constructor>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hosts (hostname) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of names (hostname:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the length of the block]]>
+ </doc>
+ </method>
+ <method name="setOffset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <doc>
+ <![CDATA[Set the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="setLength"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="length" type="long"/>
+ <doc>
+ <![CDATA[Set the length of block]]>
+ </doc>
+ </method>
+ <method name="setHosts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hosts" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the hosts hosting this block]]>
+ </doc>
+ </method>
+ <method name="setNames"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the names (host:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement write of Writable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement readFields of Writable]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BlockLocation -->
+ <!-- start class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <class name="BufferedFSInputStream" extends="java.io.BufferedInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="BufferedFSInputStream" type="org.apache.hadoop.fs.FSInputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a <code>BufferedFSInputStream</code>
+ with the specified buffer size,
+ and saves its argument, the input stream
+ <code>in</code>, for later use. An internal
+ buffer array of length <code>size</code>
+ is created and stored in <code>buf</code>.
+
+ @param in the underlying input stream.
+ @param size the buffer size.
+ @exception IllegalArgumentException if size <= 0.]]>
+ </doc>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A class optimizes reading from FSInputStream by bufferring]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <!-- start class org.apache.hadoop.fs.ChecksumException -->
+ <class name="ChecksumException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumException" type="java.lang.String, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Thrown for checksum errors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumException -->
+ <!-- start class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <class name="ChecksumFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getApproxChkSumLength" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getRawFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the raw file system]]>
+ </doc>
+ </method>
+ <method name="getChecksumFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return the name of the checksum file associated with a file.]]>
+ </doc>
+ </method>
+ <method name="isChecksumFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return true iff file is a checksum file name.]]>
+ </doc>
+ </method>
+ <method name="getChecksumFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileSize" type="long"/>
+ <doc>
+ <![CDATA[Return the length of the checksum file given the size of the
+ actual file.]]>
+ </doc>
+ </method>
+ <method name="getBytesPerSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the bytes Per Checksum]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="getChecksumLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ <param name="bytesPerSum" type="int"/>
+ <doc>
+ <![CDATA[Calculated the length of the checksum file in bytes.
+ @param size the length of the data file in bytes
+ @param bytesPerSum the number of bytes in a checksum block
+ @return the number of bytes in the checksum file]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+ Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt>
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename files/dirs]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement the delete(Path, boolean) in checksum
+ file system.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="copyCrc" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ If src and dst are directories, the copyCrc parameter
+ determines whether to copy CRC files.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Report a checksum error to the file system.
+ @param f the file name containing the error
+ @param in the stream open on the file
+ @param inPos the position of the beginning of the bad data in the file
+ @param sums the stream open on the checksum file
+ @param sumsPos the position of the beginning of the bad data in the checksum file
+ @return if retry is neccessary]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract Checksumed FileSystem.
+ It provide a basice implementation of a Checksumed FileSystem,
+ which creates a checksum file for each raw file.
+ It generates & verifies checksums at the client side.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ContentSummary -->
+ <class name="ContentSummary" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ContentSummary"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the length]]>
+ </doc>
+ </method>
+ <method name="getDirectoryCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the directory count]]>
+ </doc>
+ </method>
+ <method name="getFileCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the file count]]>
+ </doc>
+ </method>
+ <method name="getQuota" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the directory quota]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHeader" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the header of the output.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the header of the output]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the string representation of the object in the output format.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the string representation of the object]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store the summary of a content (a directory or a file).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ContentSummary -->
+ <!-- start class org.apache.hadoop.fs.DF -->
+ <class name="DF" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DF" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="DF" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFilesystem" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAvailable" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPercentUsed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMount" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="DF_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'df' program.
+ Tested on Linux, FreeBSD, Cygwin.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DF -->
+ <!-- start class org.apache.hadoop.fs.DU -->
+ <class name="DU" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DU" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param interval refresh the disk usage at this interval
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <constructor name="DU" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param conf configuration object
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <method name="decDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Decrease how much disk space we use.
+ @param value decrease by this value]]>
+ </doc>
+ </method>
+ <method name="incDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Increase how much disk space we use.
+ @param value increase by this value]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return disk space used
+ @throws IOException if the shell command fails]]>
+ </doc>
+ </method>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the path of which we're keeping track of disk usage]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Start the disk usage checking thread.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down the refreshing thread.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'du' program]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DU -->
+ <!-- start class org.apache.hadoop.fs.FileStatus -->
+ <class name="FileStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <constructor name="FileStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this a directory?
+ @return true if this is a directory]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the block size of the file.
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the replication factor of a file.
+ @return the replication factor of a file.]]>
+ </doc>
+ </method>
+ <method name="getModificationTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the modification time of the file.
+ @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get FsPermission associated with the file.
+ @return permssion. If a filesystem does not have a notion of permissions
+ or if permissions could not be determined, then default
+ permissions equivalent of "rwxrwxrwx" is returned.]]>
+ </doc>
+ </method>
+ <method name="getOwner" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the owner of the file.
+ @return owner of the file. The string could be empty if there is no
+ notion of owner of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getGroup" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the group associated with the file.
+ @return group for the file. The string could be empty if there is no
+ notion of group of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Sets permission.
+ @param permission if permission is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="owner" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets owner.
+ @param owner if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setGroup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets group.
+ @param group if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare this object to another object
+
+ @param o the object to be compared.
+ @return a negative integer, zero, or a positive integer as this object
+ is less than, equal to, or greater than the specified object.
+
+ @throws ClassCastException if the specified object's is not of
+ type FileStatus]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare if this object is equal to another object
+ @param o the object to be compared.
+ @return true if two file status has the same path name; false if not.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for the object, which is defined as
+ the hash code of the path name.
+
+ @return a hash code value for the path name.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that represents the client side information for a file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileStatus -->
+ <!-- start class org.apache.hadoop.fs.FileSystem -->
+ <class name="FileSystem" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="FileSystem"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseArgs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="i" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the cmd-line args, starting at i. Remove consumed args
+ from array. We expect param in the form:
+ '-local | -dfs <namenode:port>']]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the configured filesystem implementation.]]>
+ </doc>
+ </method>
+ <method name="getDefaultUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default filesystem URI from a configuration.
+ @param conf the configuration to access
+ @return the uri of the default filesystem]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.net.URI"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="getNamed" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="call #get(URI,Configuration) instead.">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated call #get(URI,Configuration) instead.]]>
+ </doc>
+ </method>
+ <method name="getLocal" return="org.apache.hadoop.fs.LocalFileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the local file syste
+ @param conf the configuration to configure the file system with
+ @return a LocalFileSystem]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the FileSystem for this URI's scheme and authority. The scheme
+ of the URI determines a configuration property name,
+ <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
+ The entire URI is passed to the FileSystem instance's initialize method.]]>
+ </doc>
+ </method>
+ <method name="closeAll"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all cached filesystems. Be sure those filesystems are not
+ used anymore.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a file with the provided permission
+ The permission of the file is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ It is implemented using two RPCs. It is understood that it is inefficient,
+ but the implementation is thread-safe. The other option is to change the
+ value of umask in configuration to be 0, but it is not thread-safe.
+
+ @param fs file system handle
+ @param file the name of the file to be created
+ @param permission the permission of the file
+ @return an output stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a directory with the provided permission
+ The permission of the directory is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ @see #create(FileSystem, Path, FsPermission)
+
+ @param fs file system handle
+ @param dir the name of the directory to be created
+ @param permission the permission of the directory
+ @return true if the directory creation succeeds; false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getFileBlockLocations(FileStatus, long, long)}">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.
+ @deprecated use {@link #getFileBlockLocations(FileStatus, long, long)}]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file to open]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param permission
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize
+ @param progress
+ @throws IOException
+ @see #setPermission(Path, FsPermission)]]>
+ </doc>
+ </method>
+ <method name="createNewFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the given Path as a brand-new zero-length file. If
+ create fails, or if it already existed, return false.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, getConf().getInt("io.file.buffer.size", 4096), null)
+ @param f the existing file to be appended.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, bufferSize, null).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @param progress for reporting progress if it is not null.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get replication.
+
+ @deprecated Use getFileStatus() instead
+ @param src file name
+ @return file replication
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file.
+
+ @param f the path to delete.
+ @param recursive if path is a directory and set to
+ true, the directory is deleted else throws an exception. In
+ case of a file the recursive can be set to either true or false.
+ @return true if delete is successful else false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="deleteOnExit" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a path to be deleted when FileSystem is closed.
+ When the JVM shuts down,
+ all FileSystem objects will be closed automatically.
+ Then,
+ the marked path will be deleted as a result of closing the FileSystem.
+
+ The path has to exist in the file system.
+
+ @param f the path to delete.
+ @return true if deleteOnExit is successful, otherwise false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="processDeleteOnExit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Delete all files that were marked as delete-on-exit. This recursively
+ deletes all files in the specified paths.]]>
+ </doc>
+ </method>
+ <method name="exists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if exists.
+ @param f source file]]>
+ </doc>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[True iff the named path is a regular file.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the {@link ContentSummary} of a given {@link Path}.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given path using the user-supplied path
+ filter.
+
+ @param f
+ a path name
+ @param filter
+ the user-supplied path filter
+ @return an array of FileStatus objects for the files under the given path
+ after applying the filter
+ @throws IOException
+ if encounter any problem while fetching the status]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using default
+ path filter.
+
+ @param files
+ a list of paths
+ @return a list of statuses for the files under the given paths after
+ applying the filter default Path filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using user-supplied
+ path filter.
+
+ @param files
+ a list of paths
+ @param filter
+ the user-supplied path filter
+ @return a list of statuses for the files under the given paths after
+ applying the filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Return all the files that match filePattern and are not checksum
+ files. Results are sorted by their names.
+
+ <p>
+ A filename pattern is composed of <i>regular</i> characters and
+ <i>special pattern matching</i> characters, which are:
+
+ <dl>
+ <dd>
+ <dl>
+ <p>
+ <dt> <tt> ? </tt>
+ <dd> Matches any single character.
+
+ <p>
+ <dt> <tt> * </tt>
+ <dd> Matches zero or more characters.
+
+ <p>
+ <dt> <tt> [<i>abc</i>] </tt>
+ <dd> Matches a single character from character set
+ <tt>{<i>a,b,c</i>}</tt>.
+
+ <p>
+ <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
+ <dd> Matches a single character from the character range
+ <tt>{<i>a...b</i>}</tt>. Note that character <tt><i>a</i></tt> must be
+ lexicographically less than or equal to character <tt><i>b</i></tt>.
+
+ <p>
+ <dt> <tt> [^<i>a</i>] </tt>
+ <dd> Matches a single character that is not from character set or range
+ <tt>{<i>a</i>}</tt>. Note that the <tt>^</tt> character must occur
+ immediately to the right of the opening bracket.
+
+ <p>
+ <dt> <tt> \<i>c</i> </tt>
+ <dd> Removes (escapes) any special meaning of character <i>c</i>.
+
+ <p>
+ <dt> <tt> {ab,cd} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt>
+
+ <p>
+ <dt> <tt> {ab,c{de,fh}} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cde, cfh</i>}</tt>
+
+ </dl>
+ </dd>
+ </dl>
+
+ @param pathPattern a regular expression specifying a pth pattern
+
+ @return an array of paths that match the path pattern
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array of FileStatus objects whose path names match pathPattern
+ and is accepted by the user-supplied path filter. Results are sorted by
+ their path names.
+ Return null if pathPattern has no glob and the path does not exist.
+ Return an empty array if pathPattern has a glob and no path matches it.
+
+ @param pathPattern
+ a regular expression specifying the path pattern
+ @param filter
+ a user-supplied path filter
+ @return an array of FileStatus objects
+ @throws IOException if any I/O error occurs when fetching file status]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the current user's home directory in this filesystem.
+ The default implementation returns "/user/$USER/".]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param new_dir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #mkdirs(Path, FsPermission)} with default permission.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make the given file and all non-existent parents into
+ directories. Has the semantics of Unix 'mkdir -p'.
+ Existence of the directory hierarchy is not an error.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name and the source is kept intact afterwards]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files are on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="moveToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ Remove the source afterwards]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[No more filesystem operations are needed. Will
+ release any held locks.]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total size of all files in the filesystem.]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a file status object that represents the path.
+ @param f The path we want information from
+ @return a FileStatus object
+ @throws FileNotFoundException when the path does not exist;
+ IOException see specific implementation]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permission of a path.
+ @param p
+ @param permission]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set owner of a path (i.e. a file or a directory).
+ The parameters username and groupname cannot both be null.
+ @param p The path
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.]]>
+ </doc>
+ </method>
+ <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class&lt;? extends org.apache.hadoop.fs.FileSystem&gt;"/>
+ <doc>
+ <![CDATA[Get the statistics for a particular file system
+ @param cls the class to lookup
+ @return a statistics object]]>
+ </doc>
+ </method>
+ <method name="printStatistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="statistics" type="org.apache.hadoop.fs.FileSystem.Statistics"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The statistics for this file system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An abstract base class for a fairly generic filesystem. It
+ may be implemented as a distributed filesystem, or as a "local"
+ one that reflects the locally-connected disk. The local version
+ exists for small Hadoop instances and for testing.
+
+ <p>
+
+ All user code that may potentially use the Hadoop Distributed
+ File System should be written to use a FileSystem object. The
+ Hadoop DFS is a multi-machine system that appears as a single
+ disk. It's useful because of its fault tolerance and potentially
+ very large capacity.
+
+ <p>
+ The local implementation is {@link LocalFileSystem} and distributed
+ implementation is {@link DistributedFileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem -->
+ <!-- start class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <class name="FileSystem.Statistics" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="incrementBytesRead"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes read in the statistics
+ @param newBytes the additional bytes read]]>
+ </doc>
+ </method>
+ <method name="incrementBytesWritten"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes written in the statistics
+ @param newBytes the additional bytes written]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes read
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes written
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <!-- start class org.apache.hadoop.fs.FileUtil -->
+ <class name="FileUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path
+
+ @param stats
+ an array of FileStatus objects
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path.
+ If stats if null, return path
+ @param stats
+ an array of FileStatus objects
+ @param path
+ default path to return in stats is null
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="fullyDelete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a directory and all its contents. If
+ we return false, the directory may be partially-deleted.]]>
+ </doc>
+ </method>
+ <method name="fullyDelete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recursively delete a directory.
+
+ @param fs {@link FileSystem} on which the path is present
+ @param dir directory to recursively delete
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copyMerge" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dstFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="addString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy all files in a directory to one output file (merge).]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy local files to a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="java.io.File"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy FileSystem files to local files.]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param filename The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="getDU" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Takes an input dir and returns the du on that local directory. Very basic
+ implementation.
+
+ @param dir
+ The input dir to get the disk space of this local dir
+ @return The total disk space of the input local directory]]>
+ </doc>
+ </method>
+ <method name="unZip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="unzipDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a File input it will unzip the file in a the unzip directory
+ passed as the second parameter
+ @param inFile The zip file as input
+ @param unzipDir The unzip directory where to unzip the zip file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unTar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="untarDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a Tar File as input it will untar the file in a the untar directory
+ passed as the second parameter
+
+ This utility will untar ".tar" files and ".tar.gz","tgz" files.
+
+ @param inFile The tar file as input.
+ @param untarDir The untar directory where to untar the tar file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="symLink" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="linkname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a soft link between a src and destination
+ only on a local disk. HDFS does not support this
+ @param target the target for symlink
+ @param linkname the symlink
+ @return value returned by the command]]>
+ </doc>
+ </method>
+ <method name="chmod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="perm" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Change the permissions on a filename.
+ @param filename the name of the file to change
+ @param perm the permission string
+ @return the exit code from the command
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="createLocalTempFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="basefile" type="java.io.File"/>
+ <param name="prefix" type="java.lang.String"/>
+ <param name="isDeleteOnExit" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a tmp file for a base file.
+ @param basefile the base file of the tmp
+ @param prefix file name prefix of tmp
+ @param isDeleteOnExit if true, the tmp will be deleted when the VM exits
+ @return a newly created tmp file
+ @exception IOException If a tmp file cannot created
+ @see java.io.File#createTempFile(String, String, File)
+ @see java.io.File#deleteOnExit()]]>
+ </doc>
+ </method>
+ <method name="replaceFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="target" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move the src file to the name specified by target.
+ @param src the source file
+ @param target the target file
+ @exception IOException If this operation fails]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of file-processing util methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil -->
+ <!-- start class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <class name="FileUtil.HardLink" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil.HardLink"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createHardLink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.io.File"/>
+ <param name="linkName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a hardlink]]>
+ </doc>
+ </method>
+ <method name="getLinkCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Retrieves the number of links to the specified file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Class for creating hardlinks.
+ Supports Unix, Cygwin, WindXP.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <!-- start class org.apache.hadoop.fs.FilterFileSystem -->
+ <class name="FilterFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FilterFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FilterFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List files in a directory.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param newDir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get file status.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A <code>FilterFileSystem</code> contains
+ some other file system, which it uses as
+ its basic file system, possibly transforming
+ the data along the way or providing additional
+ functionality. The class <code>FilterFileSystem</code>
+ itself simply overrides all methods of
+ <code>FileSystem</code> with versions that
+ pass all requests to the contained file
+ system. Subclasses of <code>FilterFileSystem</code>
+ may further override some of these methods
+ and may also provide additional methods
+ and fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FilterFileSystem -->
+ <!-- start class org.apache.hadoop.fs.FSDataInputStream -->
+ <class name="FSDataInputStream" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSDataInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="desired" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
+ and buffers input through a {@link BufferedInputStream}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSDataOutputStream -->
+ <class name="FSDataOutputStream" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Syncable"/>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWrappedStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link OutputStream} in a {@link DataOutputStream},
+ buffers output through a {@link BufferedOutputStream} and creates a checksum
+ file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataOutputStream -->
+ <!-- start class org.apache.hadoop.fs.FSError -->
+ <class name="FSError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Thrown for unexpected filesystem errors, presumed to reflect disk errors
+ in the native filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSError -->
+ <!-- start class org.apache.hadoop.fs.FSInputChecker -->
+ <class name="FSInputChecker" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs]]>
+ </doc>
+ </constructor>
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int, boolean, java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs
+ @param sum the type of Checksum engine
+ @param chunkSize maximun chunk size
+ @param checksumSize the number byte of each checksum]]>
+ </doc>
+ </constructor>
+ <method name="readChunk" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads in next checksum chunk data into <code>buf</code> at <code>offset</code>
+ and checksum into <code>checksum</code>.
+ The method is used for implementing read, therefore, it should be optimized
+ for sequential reading
+ @param pos chunkPos
+ @param buf desitination buffer
+ @param offset offset in buf at which to store data
+ @param len maximun number of bytes to read
+ @return number of bytes read]]>
+ </doc>
+ </method>
+ <method name="getChunkPosition" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <doc>
+ <![CDATA[Return position of beginning of chunk containing pos.
+
+ @param pos a postion in the file
+ @return the starting position of the chunk which contains the byte]]>
+ </doc>
+ </method>
+ <method name="needChecksum" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if there is a need for checksum verification]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read one checksum-verified byte
+
+ @return the next byte of data, or <code>-1</code> if the end of the
+ stream is reached.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read checksum verified bytes from this byte-input stream into
+ the specified byte array, starting at the given offset.
+
+ <p> This method implements the general contract of the corresponding
+ <code>{@link InputStream#read(byte[], int, int) read}</code> method of
+ the <code>{@link InputStream}</code> class. As an additional
+ convenience, it attempts to read as many bytes as possible by repeatedly
+ invoking the <code>read</code> method of the underlying stream. This
+ iterated <code>read</code> continues until one of the following
+ conditions becomes true: <ul>
+
+ <li> The specified number of bytes have been read,
+
+ <li> The <code>read</code> method of the underlying stream returns
+ <code>-1</code>, indicating end-of-file.
+
+ </ul> If the first <code>read</code> on the underlying stream returns
+ <code>-1</code> to indicate end-of-file then this method returns
+ <code>-1</code>. Otherwise this method returns the number of bytes
+ actually read.
+
+ @param b destination buffer.
+ @param off offset at which to start storing bytes.
+ @param len maximum number of bytes to read.
+ @return the number of bytes read, or <code>-1</code> if the end of
+ the stream has been reached.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if any checksum error occurs]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over and discards <code>n</code> bytes of data from the
+ input stream.
+
+ <p>This method may skip more bytes than are remaining in the backing
+ file. This produces no exception and the number of bytes skipped
+ may include some number of bytes that were beyond the EOF of the
+ backing file. Attempting to read from the stream after skipping past
+ the end will result in -1 indicating the end of the file.
+
+<p>If <code>n</code> is negative, no bytes are skipped.
+
+ @param n the number of bytes to be skipped.
+ @return the actual number of bytes skipped.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to skip to is corrupted]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given position in the stream.
+ The next read() will be from that position.
+
+ <p>This method may seek past the end of the file.
+ This produces no exception and an attempt to read from
+ the stream will result in -1 indicating the end of the file.
+
+ @param pos the postion to seek to.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to seek to is corrupted]]>
+ </doc>
+ </method>
+ <method name="readFully" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="stm" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A utility function that tries to read up to <code>len</code> bytes from
+ <code>stm</code>
+
+ @param stm an input stream
+ @param buf destiniation buffer
+ @param offset offset at which to store data
+ @param len number of bytes to read
+ @return actual number of bytes read
+ @throws IOException if there is any IO error]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sum" type="java.util.zip.Checksum"/>
+ <param name="maxChunkSize" type="int"/>
+ <param name="checksumSize" type="int"/>
+ <doc>
+ <![CDATA[Set the checksum related parameters
+ @param sum which type of checksum to use
+ @param maxChunkSize maximun chunk size
+ @param checksumSize checksum size]]>
+ </doc>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="readlimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="file" type="org.apache.hadoop.fs.Path"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file name from which data is read from]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This is a generic input stream for verifying checksums for
+ data before it is read by a user.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputChecker -->
+ <!-- start class org.apache.hadoop.fs.FSInputStream -->
+ <class name="FSInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSInputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="seek"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[FSInputStream is a generic old InputStream with a little bit
+ of RAF-style seek ability.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSOutputSummer -->
+ <class name="FSOutputSummer" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSOutputSummer" type="java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="writeChunk"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write one byte]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes <code>len</code> bytes from the specified byte array
+ starting at offset <code>off</code> and generate a checksum for
+ each data chunk.
+
+ <p> This method stores bytes from the given array into this
+ stream's buffer before it gets checksumed. The buffer gets checksumed
+ and flushed to the underlying output stream when all data
+ in a checksum chunk are in the buffer. If the buffer is empty and
+ requested length is at least as large as the size of next checksum chunk
+ size, this method will checksum and write the chunk directly
+ to the underlying output stream. Thus it avoids uneccessary data copy.
+
+ @param b the data.
+ @param off the start offset in the data.
+ @param len the number of bytes to write.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This is a generic output stream for generating checksums for
+ data before it is written to the underlying stream]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSOutputSummer -->
+ <!-- start class org.apache.hadoop.fs.FsShell -->
+ <class name="FsShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="FsShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentTrashDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the Trash object associated with this shell.]]>
+ </doc>
+ </method>
+ <method name="byteDesc" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="long"/>
+ <doc>
+ <![CDATA[Return an abbreviated English-language desc of the byte length]]>
+ </doc>
+ </method>
+ <method name="limitDecimalTo2" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dateForm" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="modifFmt" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provide command line access to a FileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsShell -->
+ <!-- start class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <class name="FsUrlStreamHandlerFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.net.URLStreamHandlerFactory"/>
+ <constructor name="FsUrlStreamHandlerFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsUrlStreamHandlerFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createURLStreamHandler" return="java.net.URLStreamHandler"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Factory for URL stream handlers.
+
+ There is only one handler whose job is to create UrlConnections. A
+ FsUrlConnection relies on FileSystem to choose the appropriate FS
+ implementation.
+
+ Before returning our handler, we make sure that FileSystem knows an
+ implementation for the requested scheme/protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <!-- start class org.apache.hadoop.fs.HarFileSystem -->
+ <class name="HarFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HarFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[public construction of harfilesystem]]>
+ </doc>
+ </constructor>
+ <constructor name="HarFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor to create a HarFileSystem with an
+ underlying filesystem.
+ @param fs]]>
+ </doc>
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a Har filesystem per har archive. The
+ archive home directory is the top level directory
+ in the filesystem that contains the HAR archive.
+ Be careful with this method, you do not want to go
+ on creating new Filesystem instances per call to
+ path.getFileSystem().
+ the uri of Har is
+ har://underlyingfsscheme-host:port/archivepath.
+ or
+ har:///archivepath. This assumes the underlying filesystem
+ to be used in case not specified.]]>
+ </doc>
+ </method>
+ <method name="getHarVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive.]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the uri of this filesystem.
+ The uri is of the form
+ har://underlyingfsschema-host:port/pathintheunderlyingfs]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[get block locations from the underlying fs
+ @param file the input filestatus to get block locations
+ @param start the start in the file
+ @param len the length in the file
+ @return block locations for this segment of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getHarHash" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[the hash of the path p inside iniside
+ the filesystem
+ @param p the path in the harfilesystem
+ @return the hash code of the path.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[return the filestatus of files in har archive.
+ The permission returned are that of the archive
+ index files. The permissions are not persisted
+ while creating a hadoop archive.
+ @param f the path in har filesystem
+ @return filestatus.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a har input stream which fakes end of
+ file. It reads the index files to get the part
+ file name and the size and start of the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[liststatus returns the children of a directory
+ after looking up the index files.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive path.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[copies the file in the har filesystem to a local file.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permisssion" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <field name="VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is an implementation of the Hadoop Archive
+ Filesystem. This archive Filesystem has index files
+ of the form _index* and has contents of the form
+ part-*. The index files store the indexes of the
+ real files. The index files are of the form _masterindex
+ and _index. The master index is a level of indirection
+ in to the index file to make the look ups faster. the index
+ file is sorted with hash code of the paths that it contains
+ and the master index contains pointers to the positions in
+ index for ranges of hashcodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.HarFileSystem -->
+ <!-- start class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <class name="InMemoryFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InMemoryFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InMemoryFileSystem" type="java.net.URI, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reserveSpaceWithCheckSum" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Register a file with its size. This will also register a checksum for the
+ file that the user is trying to create. This is required since none of
+ the FileSystem APIs accept the size of the file as argument. But since it
+ is required for us to apriori know the size of the file we are going to
+ create, the user must call this method for each file he wants to create
+ and reserve memory for that file. We either succeed in reserving memory
+ for both the main file and the checksum file and return true, or return
+ false.]]>
+ </doc>
+ </method>
+ <method name="getFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getNumFiles" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getFSSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPercentUsed" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of the in-memory filesystem. This implementation assumes
+ that the file lengths are known ahead of time and the total lengths of all
+ the files is below a certain number (like 100 MB, configurable). Use the API
+ reserveSpaceWithCheckSum(Path f, int size) (see below for a description of
+ the API for reserving space in the FS. The uri of this filesystem starts with
+ ramfs:// .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <!-- start class org.apache.hadoop.fs.LocalDirAllocator -->
+ <class name="LocalDirAllocator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalDirAllocator" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an allocator object
+ @param contextCfgItemName]]>
+ </doc>
+ </constructor>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. This method should be used if the size of
+ the file is not known apriori. We go round-robin over the set of disks
+ (via the configured dirs) and return the first complete path where
+ we could create the parent directory of the passed path.
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. Pass size as -1 if not known apriori. We
+ round-robin over the set of disks (via the configured dirs) and return
+ the first complete path which has enough space
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathToRead" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS for reading. We search through all the
+ configured dirs for the file's existence and return the complete
+ path to the file when we find one
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createTmpFileForWrite" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a temporary file in the local FS. Pass size as -1 if not known
+ apriori. We round-robin over the set of disks (via the configured dirs)
+ and select the first complete path which has enough space. A file is
+ created on this directory. The file is guaranteed to go away when the
+ JVM exits.
+ @param pathStr prefix for the temporary file
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return a unique temporary file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isContextValid" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextCfgItemName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Method to check whether a context is valid
+ @param contextCfgItemName
+ @return true/false]]>
+ </doc>
+ </method>
+ <method name="ifExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[We search through all the configured dirs for the file's existence
+ and return true when we find
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return true if files exist. false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of a round-robin scheme for disk allocation for creating
+ files. The way it works is that it is kept track what disk was last
+ allocated for a file write. For the current request, the next disk from
+ the set of disks would be allocated if the free space on the disk is
+ sufficient enough to accomodate the file that is being considered for
+ creation. If the space requirements cannot be met, the next disk in order
+ would be tried and so on till a disk is found with sufficient capacity.
+ Once a disk with sufficient space is identified, a check is done to make
+ sure that the disk is writable. Also, there is an API provided that doesn't
+ take the space requirements into consideration but just checks whether the
+ disk under consideration is writable (this should be used for cases where
+ the file size is not known apriori). An API is provided to read a path that
+ was created earlier. That API works by doing a scan of all the disks for the
+ input pathname.
+ This implementation also provides the functionality of having multiple
+ allocators per JVM (one for each unique functionality or context, like
+ mapred, dfs-client, etc.). It ensures that there is only one instance of
+ an allocator per context per JVM.
+ Note:
+ 1. The contexts referred above are actually the configuration items defined
+ in the Configuration class like "mapred.local.dir" (for which we want to
+ control the dir allocations). The context-strings are exactly those
+ configuration items.
+ 2. This implementation does not take into consideration cases where
+ a disk becomes read-only or goes out of space while a file is being written
+ to (disks are shared between multiple processes, and so the latter situation
+ is probable).
+ 3. In the class implementation, "Disk" is referred to as "Dir", which
+ actually points to the configured directory on the Disk which will be the
+ parent for all file write/read allocations.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalDirAllocator -->
+ <!-- start class org.apache.hadoop.fs.LocalFileSystem -->
+ <class name="LocalFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocalFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Moves files to a bad file directory on the same device, so that their
+ storage will not be reused.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the checksumed local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalFileSystem -->
+ <!-- start class org.apache.hadoop.fs.Path -->
+ <class name="Path" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="Path" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a path from a String. Path strings are URIs, but with
+ unescaped elements and some additional normalization.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Path from components.]]>
+ </doc>
+ </constructor>
+ <method name="toUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this to a URI.]]>
+ </doc>
+ </method>
+ <method name="getFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the FileSystem that owns this Path.]]>
+ </doc>
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True if the directory of this path is absolute.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the final component of this path.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the parent of a path or null if at root.]]>
+ </doc>
+ </method>
+ <method name="suffix" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a suffix to the final name in the path.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="depth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of elements in this path.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <doc>
+ <![CDATA[Returns a qualified path object.]]>
+ </doc>
+ </method>
+ <field name="SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The directory separator, a slash.]]>
+ </doc>
+ </field>
+ <field name="SEPARATOR_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CUR_DIR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Names a file or directory in a {@link FileSystem}.
+ Path strings use slash as the directory separator. A path string is
+ absolute if it begins with a slash.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Path -->
+ <!-- start interface org.apache.hadoop.fs.PathFilter -->
+ <interface name="PathFilter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Tests whether or not the specified abstract pathname should be
+ included in a pathname list.
+
+ @param path The abstract pathname to be tested
+ @return <code>true</code> if and only if <code>pathname</code>
+ should be included]]>
+ </doc>
+ </method>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PathFilter -->
+ <!-- start interface org.apache.hadoop.fs.PositionedReadable -->
+ <interface name="PositionedReadable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read upto the specified number of bytes, from a given
+ position within a file, and return the number of bytes read. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the specified number of bytes, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read number of bytes equalt to the length of the buffer, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits positional reading.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PositionedReadable -->
+ <!-- start class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <class name="RawLocalFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RawLocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the specified directory hierarchy. Does not
+ treat existence as an error.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsWorkingFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chown to set owner.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chmod to set permission.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the raw local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <!-- start interface org.apache.hadoop.fs.Seekable -->
+ <interface name="Seekable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits seeking.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Seekable -->
+ <!-- start class org.apache.hadoop.fs.ShellCommand -->
+ <class name="ShellCommand" extends="org.apache.hadoop.util.Shell"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link Shell} instead.">
+ <constructor name="ShellCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A base class for running a unix command like du or df.
+ @deprecated Use {@link Shell} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ShellCommand -->
+ <!-- start interface org.apache.hadoop.fs.Syncable -->
+ <interface name="Syncable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Synchronize all buffer with the underlying devices.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface declare the sync() operation.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Syncable -->
+ <!-- start class org.apache.hadoop.fs.Trash -->
+ <class name="Trash" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Trash" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a trash can accessor.
+ @param conf a Configuration]]>
+ </doc>
+ </constructor>
+ <method name="moveToTrash" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move a file or directory to the current trash directory.
+ @return false if the item is already in the trash or trash is disabled]]>
+ </doc>
+ </method>
+ <method name="checkpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a trash checkpoint.]]>
+ </doc>
+ </method>
+ <method name="expunge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete old checkpoints.]]>
+ </doc>
+ </method>
+ <method name="getEmptier" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a {@link Runnable} that periodically empties the trash of all
+ users, intended to be run by the superuser. Only one checkpoint is kept
+ at a time.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Run an emptier.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provides a <i>trash</i> feature. Files are moved to a user's trash
+ directory, a subdirectory of their home directory named ".Trash". Files are
+ initially moved to a <i>current</i> sub-directory of the trash directory.
+ Within that sub-directory their original path is preserved. Periodically
+ one may checkpoint the current trash and remove older checkpoints. (This
+ design permits trash management without enumeration of the full trash
+ content, without date support in the filesystem, and without clock
+ synchronization.)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Trash -->
+</package>
+<package name="org.apache.hadoop.fs.ftp">
+ <!-- start class org.apache.hadoop.fs.ftp.FTPException -->
+ <class name="FTPException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.String, java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A class to wrap a {@link Throwable} into a Runtime Exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPException -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <class name="FTPFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A stream obtained via this call must be closed before using other APIs of
+ this class or else the invocation will block.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BLOCK_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} backed by an FTP client provided by <a
+ href="http://commons.apache.org/net/">Apache Commons Net</a>.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPInputStream -->
+ <class name="FTPInputStream" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPInputStream" type="java.io.InputStream, org.apache.commons.net.ftp.FTPClient, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readLimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPInputStream -->
+</package>
+<package name="org.apache.hadoop.fs.kfs">
+ <!-- start class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+ <class name="KosmosFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="KosmosFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return null if the file doesn't exist; otherwise, get the
+ locations of the various chunks of the file file from KFS.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A FileSystem backed by KFS.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.permission">
+ <!-- start class org.apache.hadoop.fs.permission.AccessControlException -->
+ <class name="AccessControlException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AccessControlException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor is needed for unwrapping from
+ {@link org.apache.hadoop.ipc.RemoteException}.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an {@link AccessControlException}
+ with the specified detail message.
+ @param s the detail message.]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[An exception class for access control related issues.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.AccessControlException -->
+ <!-- start class org.apache.hadoop.fs.permission.FsAction -->
+ <class name="FsAction" extends="java.lang.Enum&lt;org.apache.hadoop.fs.permission.FsAction&gt;"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.fs.permission.FsAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="implies" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[Return true if this action implies that action.
+ @param that]]>
+ </doc>
+ </method>
+ <method name="and" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[AND operation.]]>
+ </doc>
+ </method>
+ <method name="or" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[OR operation.]]>
+ </doc>
+ </method>
+ <method name="not" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[NOT operation.]]>
+ </doc>
+ </method>
+ <field name="INDEX" type="int"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Octal representation]]>
+ </doc>
+ </field>
+ <field name="SYMBOL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Symbolic representation]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[File system actions, e.g. read, write, etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsAction -->
+ <!-- start class org.apache.hadoop.fs.permission.FsPermission -->
+ <class name="FsPermission" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given {@link FsAction}.
+ @param u user action
+ @param g group action
+ @param o other action]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="short"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given mode.
+ @param mode
+ @see #toShort()]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor
+
+ @param other other permission]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="permission" type="short"/>
+ <doc>
+ <![CDATA[Create an immutable {@link FsPermission} object.]]>
+ </doc>
+ </method>
+ <method name="getUserAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getGroupAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getOtherAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return other {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="fromShort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="short"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link FsPermission} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="toShort" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Encode the object to a short.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply a umask to this permission and return a new one]]>
+ </doc>
+ </method>
+ <method name="getUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="setUMask"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Set the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="getDefault" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default permission.]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unixSymbolicPermission" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create a FsPermission from a Unix symbolic permission string
+ @param unixSymbolicPermission e.g. "-rw-rw-rw-"]]>
+ </doc>
+ </method>
+ <field name="UMASK_LABEL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[umask property label]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_UMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A class for file/directory permissions.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsPermission -->
+ <!-- start class org.apache.hadoop.fs.permission.PermissionStatus -->
+ <class name="PermissionStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="PermissionStatus" type="java.lang.String, java.lang.String, org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <param name="group" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Create an immutable {@link PermissionStatus} object.]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user name]]>
+ </doc>
+ </method>
+ <method name="getGroupName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group name]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return permission]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply umask.
+ @see FsPermission#applyUMask(FsPermission)]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link PermissionStatus} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a {@link PermissionStatus} from its base components.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store permission related information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.PermissionStatus -->
+</package>
+<package name="org.apache.hadoop.fs.s3">
+ <!-- start class org.apache.hadoop.fs.s3.Block -->
+ <class name="Block" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Block" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Holds metadata about a block of data being stored in a {@link FileSystemStore}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.Block -->
+ <!-- start interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <interface name="FileSystemStore" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inode" type="org.apache.hadoop.fs.s3.INode"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="inodeExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveINode" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveBlock" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="byteRangeStart" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listSubPaths" return="java.util.Set&lt;org.apache.hadoop.fs.Path&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listDeepSubPaths" return="java.util.Set&lt;org.apache.hadoop.fs.Path&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="purge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete everything. Used for testing.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="dump"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Diagnostic method to dump all INodes to the console.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for storing and retrieving {@link INode}s and {@link Block}s.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <!-- start class org.apache.hadoop.fs.s3.INode -->
+ <class name="INode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="INode" type="org.apache.hadoop.fs.s3.INode.FileType, org.apache.hadoop.fs.s3.Block[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.fs.s3.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileType" return="org.apache.hadoop.fs.s3.INode.FileType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSerializedLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="serialize" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deserialize" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="FILE_TYPES" type="org.apache.hadoop.fs.s3.INode.FileType[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DIRECTORY_INODE" type="org.apache.hadoop.fs.s3.INode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Holds file metadata including type (regular file, or directory),
+ and the list of blocks that are pointers to the data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.INode -->
+ <!-- start class org.apache.hadoop.fs.s3.MigrationTool -->
+ <class name="MigrationTool" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="MigrationTool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ This class is a tool for migrating data from an older to a newer version
+ of an S3 filesystem.
+ </p>
+ <p>
+ All files in the filesystem are migrated by re-writing the block metadata
+ - no datafiles are touched.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.MigrationTool -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Credentials -->
+ <class name="S3Credentials" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Credentials"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@throws IllegalArgumentException if credentials for S3 cannot be
+ determined.]]>
+ </doc>
+ </method>
+ <method name="getAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSecretAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Extracts AWS credentials from the filesystem URI or configuration.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Credentials -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Exception -->
+ <class name="S3Exception" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Exception" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown if there is a problem communicating with Amazon S3.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Exception -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <class name="S3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="S3FileSystem" type="org.apache.hadoop.fs.s3.FileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[FileStatus for S3 file systems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A block-based {@link FileSystem} backed by
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ </p>
+ @see NativeS3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <class name="S3FileSystemException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystemException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when there is a fatal exception while using {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <!-- start class org.apache.hadoop.fs.s3.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="org.apache.hadoop.fs.s3.S3FileSystemException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when Hadoop cannot read the version of the data stored
+ in {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.VersionMismatchException -->
+</package>
+<package name="org.apache.hadoop.fs.s3native">
+ <!-- start class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+ <class name="NativeS3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeS3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NativeS3FileSystem" type="org.apache.hadoop.fs.s3native.NativeFileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ If <code>f</code> is a file, this method will make a single call to S3.
+ If <code>f</code> is a directory, this method will make a maximum of
+ (<i>n</i> / 1000) + 2 calls to S3, where <i>n</i> is the total number of
+ files and directories contained directly in <code>f</code>.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} for reading and writing files stored on
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation
+ stores files on S3 in their
+ native form so they can be read by other S3 tools.
+ </p>
+ @see org.apache.hadoop.fs.s3.S3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.shell">
+ <!-- start class org.apache.hadoop.fs.shell.Command -->
+ <class name="Command" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Command" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the command's name excluding the leading character -]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the command on the input path
+
+ @param path the input path
+ @throws IOException if any error occurs]]>
+ </doc>
+ </method>
+ <method name="runAll" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[For each source path, execute the command
+
+ @return 0 if it runs successfully; -1 if it fails]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="args" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract class for the execution of a file system command]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Command -->
+ <!-- start class org.apache.hadoop.fs.shell.CommandFormat -->
+ <class name="CommandFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CommandFormat" type="java.lang.String, int, int, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor]]>
+ </doc>
+ </constructor>
+ <method name="parse" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="pos" type="int"/>
+ <doc>
+ <![CDATA[Parse parameters starting from the given position
+
+ @param args an array of input arguments
+ @param pos the position at which starts to parse
+ @return a list of parameters]]>
+ </doc>
+ </method>
+ <method name="getOpt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="option" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return if the option is set or not
+
+ @param option String representation of an option
+ @return true is the option is set; false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Parse the args of a command and check the format of args.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.CommandFormat -->
+ <!-- start class org.apache.hadoop.fs.shell.Count -->
+ <class name="Count" extends="org.apache.hadoop.fs.shell.Command"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Count" type="java.lang.String[], int, org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param cmd the count command
+ @param pos the starting index of the arguments
+ @param fs the file system handler]]>
+ </doc>
+ </constructor>
+ <method name="matches" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Check if a command is the count command
+
+ @param cmd A string representation of a command starting with "-"
+ @return true if this is a count command; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USAGE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DESCRIPTION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Count the number of directories, files, bytes, quota, and remaining quota.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Count -->
+</package>
+<package name="org.apache.hadoop.io">
+ <!-- start class org.apache.hadoop.io.AbstractMapWritable -->
+ <class name="AbstractMapWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="AbstractMapWritable"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor.]]>
+ </doc>
+ </constructor>
+ <method name="addToMap"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add a Class to the maps if it is not already present.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="byte"/>
+ <doc>
+ <![CDATA[@return the Class class for the specified id]]>
+ </doc>
+ </method>
+ <method name="getId" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[@return the id for the specified Class]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Used by child copy constructors.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the conf]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@param conf the conf to set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract base class for MapWritable and SortedMapWritable
+
+ Unlike org.apache.nutch.crawl.MapWritable, this class allows creation of
+ MapWritable&lt;Writable, MapWritable&gt; so the CLASS_TO_ID and ID_TO_CLASS
+ maps travel with the class instead of being static.
+
+ Class ids range from 1 to 127 so there can be at most 127 distinct classes
+ in any specific map instance.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.AbstractMapWritable -->
+ <!-- start class org.apache.hadoop.io.ArrayFile -->
+ <class name="ArrayFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A dense file-based mapping from integers to values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Reader -->
+ <class name="ArrayFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an array reader for the named file.]]>
+ </doc>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader before its <code>n</code>th value.]]>
+ </doc>
+ </method>
+ <method name="next" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read and return the next value in the file.]]>
+ </doc>
+ </method>
+ <method name="key" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the key associated with the most recent call to {@link
+ #seek(long)}, {@link #next(Writable)}, or {@link
+ #get(long,Writable)}.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the <code>n</code>th value in the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Reader -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Writer -->
+ <class name="ArrayFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a value to the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Writer -->
+ <!-- start class org.apache.hadoop.io.ArrayWritable -->
+ <class name="ArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for arrays containing instances of a class. The elements of this
+ writable must all be instances of the same class. If this writable will be
+ the input for a Reducer, you will need to create a subclass that sets the
+ value to be of the proper type.
+
+ For example:
+ <code>
+ public class IntArrayWritable extends ArrayWritable {
+ public IntArrayWritable() {
+ super(IntWritable.class);
+ }
+ }
+ </code>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayWritable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable -->
+ <class name="BooleanWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BooleanWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BooleanWritable" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="get" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for booleans.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <class name="BooleanWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BooleanWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BooleanWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.BytesWritable -->
+ <class name="BytesWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-size sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="BytesWritable" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a BytesWritable using the byte array as the initial value.
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the BytesWritable.
+ @return The data is only valid between 0 and getSize() - 1.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current size of the buffer.]]>
+ </doc>
+ </method>
+ <method name="setSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Change the size of the buffer. The values in the old range are preserved
+ and any new values are undefined. The capacity is changed if it is
+ necessary.
+ @param size The new number of bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum size that could handled without
+ resizing the backing storage.
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_cap" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved.
+ @param new_cap The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="org.apache.hadoop.io.BytesWritable"/>
+ <doc>
+ <![CDATA[Set the BytesWritable to the contents of the given newData.
+ @param newData the value to set this BytesWritable to.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Set the value to a copy of the given byte range
+ @param newData the new values to copy in
+ @param offset the offset in newData to start at
+ @param length the number of bytes to copy]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="right_obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Define the sort order of the BytesWritable.
+ @param right_obj The other bytes writable
+ @return Positive if left is bigger than right, 0 if they are equal, and
+ negative if left is smaller than right.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="right_obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Are the two byte sequences equal?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Generate the stream of bytes as hex pairs separated by ' '.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is usable as a key or value.
+ It is resizable and distinguishes between the size of the seqeunce and
+ the current capacity. The hash function is the front of the md5 of the
+ buffer. The sort order is the same as memcmp.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable -->
+ <!-- start class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <class name="BytesWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BytesWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BytesWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ByteWritable -->
+ <class name="ByteWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="ByteWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ByteWritable" type="byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Set the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a ByteWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two ByteWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for a single byte.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable -->
+ <!-- start class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <class name="ByteWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ByteWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for ByteWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <!-- start interface org.apache.hadoop.io.Closeable -->
+ <interface name="Closeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="use java.io.Closeable">
+ <implements name="java.io.Closeable"/>
+ <doc>
+ <![CDATA[@deprecated use java.io.Closeable]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Closeable -->
+ <!-- start class org.apache.hadoop.io.CompressedWritable -->
+ <class name="CompressedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="CompressedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ensureInflated"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Must be called by all methods which access fields to ensure that the data
+ has been uncompressed.]]>
+ </doc>
+ </method>
+ <method name="readFieldsCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #readFields(DataInput)}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #write(DataOutput)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base-class for Writables which store themselves compressed and lazily
+ inflate on field access. This is useful for large objects whose fields are
+ not be altered during a map or reduce operation: leaving the field data
+ compressed makes copying the instance from one file to another much
+ faster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.CompressedWritable -->
+ <!-- start class org.apache.hadoop.io.DataInputBuffer -->
+ <class name="DataInputBuffer" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataInputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataInput} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataInputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataInputBuffer buffer = new DataInputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using DataInput methods ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataInputBuffer -->
+ <!-- start class org.apache.hadoop.io.DataOutputBuffer -->
+ <class name="DataOutputBuffer" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataOutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <constructor name="DataOutputBuffer" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a DataInput directly into the buffer.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataOutput} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataOutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataOutputBuffer buffer = new DataOutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using DataOutput methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataOutputBuffer -->
+ <!-- start class org.apache.hadoop.io.DefaultStringifier -->
+ <class name="DefaultStringifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Stringifier&lt;T&gt;"/>
+ <constructor name="DefaultStringifier" type="org.apache.hadoop.conf.Configuration, java.lang.Class&lt;T&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="fromString" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="store"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="item" type="K"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the item in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to store
+ @param item the object to be stored
+ @param keyName the name of the key to use
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="load" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class&lt;K&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="storeArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="items" type="K[]"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the array of items in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param items the objects to be stored
+ @param keyName the name of the key to use
+ @throws IndexOutOfBoundsException if the items array is empty
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="loadArray" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class&lt;K&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the array of objects from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[DefaultStringifier is the default implementation of the {@link Stringifier}
+ interface which stringifies the objects using base64 encoding of the
+ serialized version of the objects. The {@link Serializer} and
+ {@link Deserializer} are obtained from the {@link SerializationFactory}.
+ <br>
+ DefaultStringifier offers convenience methods to store/load objects to/from
+ the configuration.
+
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DefaultStringifier -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable -->
+ <class name="DoubleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="DoubleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DoubleWritable" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="double"/>
+ </method>
+ <method name="get" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a DoubleWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Writable for Double values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <class name="DoubleWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DoubleWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for DoubleWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.FloatWritable -->
+ <class name="FloatWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="FloatWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FloatWritable" type="float"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="float"/>
+ <doc>
+ <![CDATA[Set the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a FloatWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two FloatWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for floats.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable -->
+ <!-- start class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <class name="FloatWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FloatWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for FloatWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.GenericWritable -->
+ <class name="GenericWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="GenericWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Set the instance that is wrapped.
+
+ @param obj]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the wrapped instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTypes" return="java.lang.Class[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return all classes that may be wrapped. Subclasses should implement this
+ to return a constant array of classes.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper for Writable instances.
+ <p>
+ When two sequence files, which have same Key type but different Value
+ types, are mapped out to reduce, multiple Value types is not allowed.
+ In this case, this class can help you wrap instances with different types.
+ </p>
+
+ <p>
+ Compared with <code>ObjectWritable</code>, this class is much more effective,
+ because <code>ObjectWritable</code> will append the class declaration as a String
+ into the output file in every Key-Value pair.
+ </p>
+
+ <p>
+ Generic Writable implements {@link Configurable} interface, so that it will be
+ configured by the framework. The configuration is passed to the wrapped objects
+ implementing {@link Configurable} interface <i>before deserialization</i>.
+ </p>
+
+ how to use it: <br>
+ 1. Write your own class, such as GenericObject, which extends GenericWritable.<br>
+ 2. Implements the abstract method <code>getTypes()</code>, defines
+ the classes which will be wrapped in GenericObject in application.
+ Attention: this classes defined in <code>getTypes()</code> method, must
+ implement <code>Writable</code> interface.
+ <br><br>
+
+ The code looks like this:
+ <blockquote><pre>
+ public class GenericObject extends GenericWritable {
+
+ private static Class[] CLASSES = {
+ ClassType1.class,
+ ClassType2.class,
+ ClassType3.class,
+ };
+
+ protected Class[] getTypes() {
+ return CLASSES;
+ }
+
+ }
+ </pre></blockquote>
+
+ @since Nov 8, 2006]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.GenericWritable -->
+ <!-- start class org.apache.hadoop.io.InputBuffer -->
+ <class name="InputBuffer" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link InputStream} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new InputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ InputBuffer buffer = new InputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using InputStream methods ...
+ }
+ </pre>
+ @see DataInputBuffer
+ @see DataOutput]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.InputBuffer -->
+ <!-- start class org.apache.hadoop.io.IntWritable -->
+ <class name="IntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="IntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="IntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a IntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two IntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for ints.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable -->
+ <!-- start class org.apache.hadoop.io.IntWritable.Comparator -->
+ <class name="IntWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IntWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for IntWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.IOUtils -->
+ <class name="IOUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="buffSize" type="int"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param buffSize the size of the buffer
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another. <strong>closes the input and output streams
+ at the end</strong>.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads len bytes in a loop.
+ @param in The InputStream to read from
+ @param buf The buffer to fill
+ @param off offset from the buffer
+ @param len the length of bytes to read
+ @throws IOException if it could not read requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Similar to readFully(). Skips bytes in a loop.
+ @param in The InputStream to skip bytes from
+ @param len number of bytes to skip.
+ @throws IOException if it could not skip requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="closeables" type="java.io.Closeable[]"/>
+ <doc>
+ <![CDATA[Close the Closeable objects and <b>ignore</b> any {@link IOException} or
+ null pointers. Must only be used for cleanup in exception handlers.
+ @param log the log to record problems to at debug level. Can be null.
+ @param closeables the objects to close]]>
+ </doc>
+ </method>
+ <method name="closeStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Closeable"/>
+ <doc>
+ <![CDATA[Closes the stream ignoring {@link IOException}.
+ Must only be called in cleaning up from exception handlers.
+ @param stream the Stream to close]]>
+ </doc>
+ </method>
+ <method name="closeSocket"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <doc>
+ <![CDATA[Closes the socket ignoring {@link IOException}
+ @param sock the Socket to close]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An utility class for I/O related functionality.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils -->
+ <!-- start class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <class name="IOUtils.NullOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils.NullOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[/dev/null of OutputStreams.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <!-- start class org.apache.hadoop.io.LongWritable -->
+ <class name="LongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="LongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a LongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two LongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable -->
+ <!-- start class org.apache.hadoop.io.LongWritable.Comparator -->
+ <class name="LongWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <class name="LongWritable.DecreasingComparator" extends="org.apache.hadoop.io.LongWritable.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.DecreasingComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A decreasing Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <!-- start class org.apache.hadoop.io.MapFile -->
+ <class name="MapFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="oldName" type="java.lang.String"/>
+ <param name="newName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames an existing map directory.]]>
+ </doc>
+ </method>
+ <method name="delete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deletes the named map file.]]>
+ </doc>
+ </method>
+ <method name="fix" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valueClass" type="java.lang.Class"/>
+ <param name="dryrun" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This method attempts to fix a corrupt MapFile by re-creating its index.
+ @param fs filesystem
+ @param dir directory containing the MapFile data and index
+ @param keyClass key class (has to be a subclass of Writable)
+ @param valueClass value class (has to be a subclass of Writable)
+ @param dryrun do not perform any changes, just report what needs to be done
+ @return number of valid entries in this MapFile, or -1 if no fixing was needed
+ @throws Exception]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="INDEX_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the index file.]]>
+ </doc>
+ </field>
+ <field name="DATA_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the data file.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A file-based map from keys to values.
+
+ <p>A map is a directory containing two files, the <code>data</code> file,
+ containing all keys and values in the map, and a smaller <code>index</code>
+ file, containing a fraction of the keys. The fraction is determined by
+ {@link Writer#getIndexInterval()}.
+
+ <p>The index file is read entirely into memory. Thus key implementations
+ should try to keep themselves small.
+
+ <p>Map files are created by adding entries in-order. To maintain a large
+ database, perform updates by copying the previous version of a database and
+ merging in a sorted change list, to create a new version of the database in
+ a new file. Sorting large change lists can be done with {@link
+ SequenceFile.Sorter}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile -->
+ <!-- start class org.apache.hadoop.io.MapFile.Reader -->
+ <class name="MapFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map using the named comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Hook to allow subclasses to defer opening streams until further
+ initialization is complete.
+ @see #createDataFileReader(FileSystem, Path, Configuration)]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="open"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dirName" type="java.lang.String"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDataFileReader" return="org.apache.hadoop.io.SequenceFile.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dataFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link SequenceFile.Reader} returned.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Re-positions the reader before its first key.]]>
+ </doc>
+ </method>
+ <method name="midKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the key at approximately the middle of the file.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalKey"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the final key from the file.
+
+ @param key key to read into]]>
+ </doc>
+ </method>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader at the named key, or if none such exists, at the
+ first entry after the named key. Returns true iff the named key exists
+ in this map.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the map into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ the end of the map]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the value for the named key, or null if none exists.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+ Returns <code>key</code> or if it does not exist, at the first entry
+ after the named key.
+
+- * @param key - key that we're trying to find
+- * @param val - data value if key is found
+- * @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <param name="before" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+
+ @param key - key that we're trying to find
+ @param val - data value if key is found
+ @param before - IF true, and <code>key</code> does not exist, return
+ the first entry that falls just before the <code>key</code>. Otherwise,
+ return the record that sorts just after.
+ @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Reader -->
+ <!-- start class org.apache.hadoop.io.MapFile.Writer -->
+ <class name="MapFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <method name="getIndexInterval" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of entries that are added before an index entry is added.]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval.
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval and stores it in conf
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair to the map. The key must be greater or equal
+ to the previous key added to the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writes a new map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Writer -->
+ <!-- start class org.apache.hadoop.io.MapWritable -->
+ <class name="MapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Map&lt;org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapWritable" type="org.apache.hadoop.io.MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map&lt;? extends org.apache.hadoop.io.Writable, ? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable Map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapWritable -->
+ <!-- start class org.apache.hadoop.io.MD5Hash -->
+ <class name="MD5Hash" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="MD5Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash from a hex string.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash with a specified value.]]>
+ </doc>
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs, reads and returns an instance.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+ <doc>
+ <![CDATA[Copy the contents of another instance into this instance.]]>
+ </doc>
+ </method>
+ <method name="getDigest" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the digest bytes.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="halfDigest" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a half-sized version of this MD5. Fits in a long]]>
+ </doc>
+ </method>
+ <method name="quarterDigest" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a 32-bit digest of the MD5.
+ @return the first 4 bytes of the md5]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is an MD5Hash whose digest contains the
+ same values.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for this object.
+ Only uses the first 4 bytes, since md5s are evenly distributed.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares this object with the specified object for order.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="setDigest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the digest value from a hex string.]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Writable for MD5 hash values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash -->
+ <!-- start class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <class name="MD5Hash.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MD5Hash.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for MD5Hash keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <!-- start class org.apache.hadoop.io.MultipleIOException -->
+ <class name="MultipleIOException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getExceptions" return="java.util.List&lt;java.io.IOException&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the underlying exceptions]]>
+ </doc>
+ </method>
+ <method name="createIOException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="exceptions" type="java.util.List&lt;java.io.IOException&gt;"/>
+ <doc>
+ <![CDATA[A convenient method to create an {@link IOException}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Encapsulate a list of {@link IOException} into an {@link IOException}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MultipleIOException -->
+ <!-- start class org.apache.hadoop.io.NullWritable -->
+ <class name="NullWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <method name="get" return="org.apache.hadoop.io.NullWritable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the single instance of this class.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Singleton Writable with no data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable -->
+ <!-- start class org.apache.hadoop.io.NullWritable.Comparator -->
+ <class name="NullWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator &quot;optimized&quot; for NullWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ObjectWritable -->
+ <class name="ObjectWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="ObjectWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Class, java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the instance, or null if none.]]>
+ </doc>
+ </method>
+ <method name="getDeclaredClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the class this is meant to be.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Reset the instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeObject"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="instance" type="java.lang.Object"/>
+ <param name="declaredClass" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="objectWritable" type="org.apache.hadoop.io.ObjectWritable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A polymorphic Writable that writes an instance with it's class name.
+ Handles arrays, strings and primitive types without a Writable wrapper.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ObjectWritable -->
+ <!-- start class org.apache.hadoop.io.OutputBuffer -->
+ <class name="OutputBuffer" extends="java.io.FilterOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.OutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a InputStream directly into the buffer.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link OutputStream} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new OutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ OutputBuffer buffer = new OutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using OutputStream methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>
+ @see DataOutputBuffer
+ @see InputBuffer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.OutputBuffer -->
+ <!-- start interface org.apache.hadoop.io.RawComparator -->
+ <interface name="RawComparator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Comparator&lt;T&gt;"/>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link Comparator} that operates directly on byte representations of
+ objects.
+ </p>
+ @param <T>
+ @see DeserializerComparator]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.RawComparator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile -->
+ <class name="SequenceFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapred.JobConf#getMapOutputCompressionType()}
+ to get {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the compression type for the reduce outputs
+ @param job the job config to look in
+ @return the kind of compression to use
+ @deprecated Use {@link org.apache.hadoop.mapred.JobConf#getMapOutputCompressionType()}
+ to get {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.JobConf#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the compression type for sequence files.
+ @param job the configuration to modify
+ @param val the new compression type (none, block, record)
+ @deprecated Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.JobConf#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param bufferSize buffer size for the underlaying outputstream.
+ @param replication replication factor for the file.
+ @param blockSize block size for the file.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="SYNC_INTERVAL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes between sync points.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<code>SequenceFile</code>s are flat files consisting of binary key/value
+ pairs.
+
+ <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and
+ {@link Sorter} classes for writing, reading and sorting respectively.</p>
+
+ There are three <code>SequenceFile</code> <code>Writer</code>s based on the
+ {@link CompressionType} used to compress key/value pairs:
+ <ol>
+ <li>
+ <code>Writer</code> : Uncompressed records.
+ </li>
+ <li>
+ <code>RecordCompressWriter</code> : Record-compressed files, only compress
+ values.
+ </li>
+ <li>
+ <code>BlockCompressWriter</code> : Block-compressed files, both keys &
+ values are collected in 'blocks'
+ separately and compressed. The size of
+ the 'block' is configurable.
+ </ol>
+
+ <p>The actual compression algorithm used to compress key and/or values can be
+ specified by using the appropriate {@link CompressionCodec}.</p>
+
+ <p>The recommended way is to use the static <tt>createWriter</tt> methods
+ provided by the <code>SequenceFile</code> to chose the preferred format.</p>
+
+ <p>The {@link Reader} acts as the bridge and can read any of the above
+ <code>SequenceFile</code> formats.</p>
+
+ <h4 id="Formats">SequenceFile Formats</h4>
+
+ <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
+ depending on the <code>CompressionType</code> specified. All of them share a
+ <a href="#Header">common header</a> described below.
+
+ <h5 id="Header">SequenceFile Header</h5>
+ <ul>
+ <li>
+ version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual
+ version number (e.g. SEQ4 or SEQ6)
+ </li>
+ <li>
+ keyClassName -key class
+ </li>
+ <li>
+ valueClassName - value class
+ </li>
+ <li>
+ compression - A boolean which specifies if compression is turned on for
+ keys/values in this file.
+ </li>
+ <li>
+ blockCompression - A boolean which specifies if block-compression is
+ turned on for keys/values in this file.
+ </li>
+ <li>
+ compression codec - <code>CompressionCodec</code> class which is used for
+ compression of keys and/or values (if compression is
+ enabled).
+ </li>
+ <li>
+ metadata - {@link Metadata} for this file.
+ </li>
+ <li>
+ sync - A sync marker to denote end of the header.
+ </li>
+ </ul>
+
+ <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li>Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li><i>Compressed</i> Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record <i>Block</i>
+ <ul>
+ <li>Compressed key-lengths block-size</li>
+ <li>Compressed key-lengths block</li>
+ <li>Compressed keys block-size</li>
+ <li>Compressed keys block</li>
+ <li>Compressed value-lengths block-size</li>
+ <li>Compressed value-lengths block</li>
+ <li>Compressed values block-size</li>
+ <li>Compressed values block</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <p>The compressed blocks of key lengths and value lengths consist of the
+ actual lengths of individual keys/values encoded in ZeroCompressedInteger
+ format.</p>
+
+ @see CompressionCodec]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <class name="SequenceFile.CompressionType" extends="java.lang.Enum&lt;org.apache.hadoop.io.SequenceFile.CompressionType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.SequenceFile.CompressionType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression type used to compress key/value pairs in the
+ {@link SequenceFile}.
+
+ @see SequenceFile.Writer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <class name="SequenceFile.Metadata" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFile.Metadata" type="java.util.TreeMap&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="getMetadata" return="java.util.TreeMap&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The class encapsulating with the metadata of a file.
+ The metadata of a file is a list of attribute name/value
+ pairs of Text type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Reader -->
+ <class name="SequenceFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Reader" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the named file.]]>
+ </doc>
+ </constructor>
+ <method name="openFile" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link FSDataInputStream} returned.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the key class.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the value class.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="isCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if values are compressed.]]>
+ </doc>
+ </method>
+ <method name="isBlockCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if records are block-compressed.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="getMetadata" return="org.apache.hadoop.io.SequenceFile.Metadata"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the metadata object of the file]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file into <code>key</code>, skipping its
+ value. True if another entry exists, and false at end of file.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the file into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ end of file]]>
+ </doc>
+ </method>
+ <method name="next" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.">
+ <param name="buffer" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.]]>
+ </doc>
+ </method>
+ <method name="createValueBytes" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRaw" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' records.
+ @param key - The buffer into which the key is read
+ @param val - The 'raw' value
+ @return Returns the total record length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawKey" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' keys.
+ @param key - The buffer into which the key is read
+ @return Returns the key length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="java.lang.Object"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file, skipping its
+ value. Return null at end of file.]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' values.
+ @param val - The 'raw' value
+ @return Returns the value length
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the current byte position in the input file.
+
+ <p>The position passed must be a position returned by {@link
+ SequenceFile.Writer#getLength()} when writing this file. To seek to an arbitrary
+ position, use {@link SequenceFile.Reader#sync(long)}.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the next sync mark past a given position.]]>
+ </doc>
+ </method>
+ <method name="syncSeen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true iff the previous call to next passed a sync mark.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current byte position in the input file.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reads key/value pairs from a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <class name="SequenceFile.Sorter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge files containing the named classes.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.io.RawComparator, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge using an arbitrary {@link RawComparator}.]]>
+ </doc>
+ </constructor>
+ <method name="setFactor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="factor" type="int"/>
+ <doc>
+ <![CDATA[Set the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="getFactor" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="setMemory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="memory" type="int"/>
+ <doc>
+ <![CDATA[Set the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="getMemory" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setProgressable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progressable" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Set the progressable object in order to report progress.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files into an output file.
+ @param inFiles the files to be sorted
+ @param outFile the sorted output file
+ @param deleteInput should the input files be deleted as they are read?]]>
+ </doc>
+ </method>
+ <method name="sortAndIterate" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files and return an iterator.
+ @param inFiles the files to be sorted
+ @param tempDir the directory where temp files are created during sort
+ @param deleteInput should the input files be deleted as they are read?
+ @return iterator the RawKeyValueIterator]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The backwards compatible interface to sort.
+ @param inFile the input file to sort
+ @param outFile the sorted output file]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="segments" type="java.util.List&lt;org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor&gt;"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the list of segments of type <code>SegmentDescriptor</code>
+ @param segments the list of SegmentDescriptors
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIterator
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[] using a max factor value
+ that is already set
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="factor" type="int"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param factor the factor that will be used as the maximum merge fan-in
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInputs" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param tempDir the directory for creating temp files during merge
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cloneFileAttributes" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="prog" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clones the attributes (like compression of the input file and creates a
+ corresponding Writer
+ @param inputFile the path of the input file whose attributes should be
+ cloned
+ @param outputFile the path of the output file
+ @param prog the Progressable to report status during the file write
+ @return Writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="records" type="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"/>
+ <param name="writer" type="org.apache.hadoop.io.SequenceFile.Writer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes records from RawKeyValueIterator into a file represented by the
+ passed writer
+ @param records the RawKeyValueIterator
+ @param writer the Writer created earlier
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merge the provided files.
+ @param inFiles the array of input path names
+ @param outFile the final output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sorts key/value pairs in a sequence-format file.
+
+ <p>For best performance, applications should make sure that the {@link
+ Writable#readFields(DataInput)} implementation of their keys is
+ very efficient. In particular, it should avoid allocating memory.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <interface name="SequenceFile.Sorter.RawKeyValueIterator" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw key
+ @return DataOutputBuffer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getValue" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw value
+ @return ValueBytes
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets up the current key and value (for getKey and getValue)
+ @return true if there exists a key/value, false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[closes the iterator so that the underlying streams can be closed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the Progress object; this has a float (0.0 - 1.0)
+ indicating the bytes processed by the iterator so far]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to iterate over raw keys/values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <class name="SequenceFile.Sorter.SegmentDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="SequenceFile.Sorter.SegmentDescriptor" type="long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a segment
+ @param segmentOffset the offset of the segment in the file
+ @param segmentLength the length of the segment
+ @param segmentPathName the path name of the file containing the segment]]>
+ </doc>
+ </constructor>
+ <method name="doSync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do the sync checks]]>
+ </doc>
+ </method>
+ <method name="preserveInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="preserve" type="boolean"/>
+ <doc>
+ <![CDATA[Whether to delete the files when no longer needed]]>
+ </doc>
+ </method>
+ <method name="shouldPreserveInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRawKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the rawKey object with the key returned by the Reader
+ @return true if there is a key returned; false, otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rawValue" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the passed rawValue with the value corresponding to the key
+ read earlier
+ @param rawValue
+ @return the length of the value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the stored rawKey]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The default cleanup. Subclasses can override this with a custom
+ cleanup]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class defines a merge segment. This class can be subclassed to
+ provide a customized cleanup method implementation. In this
+ implementation, cleanup closes the file handle and deletes the file]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <interface name="SequenceFile.ValueBytes" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the uncompressed bytes to the outStream.
+ @param outStream : Stream to write uncompressed bytes into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to outStream.
+ Note: that it will NOT compress the bytes if they are not compressed.
+ @param outStream : Stream to write compressed bytes into.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Size of stored data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to 'raw' values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Writer -->
+ <class name="SequenceFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, int, short, long, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a sync point]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="appendRaw"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keyData" type="byte[]"/>
+ <param name="keyOffset" type="int"/>
+ <param name="keyLength" type="int"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current length of the output file.
+
+ <p>This always returns a synchronized position. In other words,
+ immediately after calling {@link SequenceFile.Reader#seek(long)} with a position
+ returned by this method, {@link SequenceFile.Reader#next(Writable)} may be called. However
+ the key may be earlier in the file than key last written when this
+ method was called (e.g., with block-compression, it may be the first key
+ in the block that was being written when this method was called).]]>
+ </doc>
+ </method>
+ <field name="keySerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="uncompressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="compressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Write key/value pairs to a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SetFile -->
+ <class name="SetFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A file-based set of keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile -->
+ <!-- start class org.apache.hadoop.io.SetFile.Reader -->
+ <class name="SetFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set using the named comparator.]]>
+ </doc>
+ </constructor>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in a set into <code>key</code>. Returns
+ true if such a key exists and false when at the end of the set.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the matching key from a set into <code>key</code>.
+ Returns <code>key</code>, or null if no match exists.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SetFile.Writer -->
+ <class name="SetFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="pass a Configuration too">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named set for keys of the named class.
+ @deprecated pass a Configuration too]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element class and compression type.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element comparator and compression type.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key to a set. The key must be strictly greater than the
+ previous key added to the set.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SortedMapWritable -->
+ <class name="SortedMapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="SortedMapWritable" type="org.apache.hadoop.io.SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="comparator" return="java.util.Comparator&lt;? super org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="firstKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="headMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="lastKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="subMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="tailMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set&lt;org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map&lt;? extends org.apache.hadoop.io.WritableComparable, ? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable SortedMap.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SortedMapWritable -->
+ <!-- start interface org.apache.hadoop.io.Stringifier -->
+ <interface name="Stringifier" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Converts the object to a string representation
+ @param obj the object to convert
+ @return the string representation of the object
+ @throws IOException if the object cannot be converted]]>
+ </doc>
+ </method>
+ <method name="fromString" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from its string representation.
+ @param str the string representation of the object
+ @return restored object
+ @throws IOException if the object cannot be restored]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes this object.
+ @throws IOException if an I/O error occurs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stringifier interface offers two methods to convert an object
+ to a string representation and restore the object given its
+ string representation.
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Stringifier -->
+ <!-- start class org.apache.hadoop.io.Text -->
+ <class name="Text" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Text" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a string.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="org.apache.hadoop.io.Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from another text.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a byte array.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the raw bytes; however, only data up to {@link #getLength()} is
+ valid.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of bytes in the byte array]]>
+ </doc>
+ </method>
+ <method name="charAt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="int"/>
+ <doc>
+ <![CDATA[Returns the Unicode Scalar Value (32-bit integer value)
+ for the character at <code>position</code>. Note that this
+ method avoids using the converter or doing String instatiation
+ @return the Unicode scalar value at position or -1
+ if the position is invalid or points to a
+ trailing byte]]>
+ </doc>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Finds any occurence of <code>what</code> in the backing
+ buffer, starting as position <code>start</code>. The starting
+ position is measured in bytes and the return value is in
+ terms of byte position in the buffer. The backing buffer is
+ not converted to a string for this operation.
+ @return byte position of the first occurence of the search
+ string in the UTF-8 buffer or -1 if not found]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <doc>
+ <![CDATA[Set to a utf8 byte array]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[copy a text.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Set the Text to range of bytes
+ @param utf8 the data to copy from
+ @param start the first position of the new string
+ @param len the number of bytes of the new string]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Append a range of bytes to the end of the given text
+ @param utf8 the data to copy from
+ @param start the first position to append from utf8
+ @param len the number of bytes to append]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Clear the string to empty.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert text back to string
+ @see java.lang.Object#toString()]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialize]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one Text in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialize
+ write this object to out
+ length uses zero-compressed encoding
+ @see Writable#write(DataOutput)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare two Texts bytewise using standard UTF8 ordering.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a Text with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[hash function]]>
+ </doc>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If the input is malformed,
+ replace by a default value.]]>
+ </doc>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If the input is malformed,
+ invalid chars are replaced by a default value.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF8 encoded string from in]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF8 encoded string to out]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check if a byte array contains valid utf-8
+ @param utf8 byte array
+ @throws MalformedInputException if the byte array contains invalid utf-8]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check to see if a byte array is valid utf-8
+ @param utf8 the array of bytes
+ @param start the offset of the first byte in the array
+ @param len the length of the byte sequence
+ @throws MalformedInputException if the byte array contains invalid bytes]]>
+ </doc>
+ </method>
+ <method name="bytesToCodePoint" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="java.nio.ByteBuffer"/>
+ <doc>
+ <![CDATA[Returns the next code point at the current position in
+ the buffer. The buffer's position will be incremented.
+ Any mark set on this buffer will be changed by this method!]]>
+ </doc>
+ </method>
+ <method name="utf8Length" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[For the given string, returns the number of UTF-8 bytes
+ required to encode the string.
+ @param string text to encode
+ @return number of UTF-8 bytes required to encode]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class stores text using standard UTF8 encoding. It provides methods
+ to serialize, deserialize, and compare texts at byte level. The type of
+ length is integer and is serialized using zero-compressed format. <p>In
+ addition, it provides methods for string traversal without converting the
+ byte array to a string. <p>Also includes utilities for
+ serializing/deserialing a string, coding/decoding a string, checking if a
+ byte array contains valid UTF8 code, calculating the length of an encoded
+ string.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text -->
+ <!-- start class org.apache.hadoop.io.Text.Comparator -->
+ <class name="Text.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Text.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for Text keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text.Comparator -->
+ <!-- start class org.apache.hadoop.io.TwoDArrayWritable -->
+ <class name="TwoDArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[][]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[][]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[][]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for 2D arrays containing a matrix of instances of a class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.TwoDArrayWritable -->
+ <!-- start class org.apache.hadoop.io.UTF8 -->
+ <class name="UTF8" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="replaced by Text">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UTF8" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <constructor name="UTF8" type="org.apache.hadoop.io.UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw bytes.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the encoded string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one UTF8 in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare two UTF8s.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert to a String.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a UTF8 with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convert a string to a UTF-8 encoded byte array.
+ @see String#getBytes(String)]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string.
+
+ @see DataInput#readUTF()]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF-8 encoded string.
+
+ @see DataOutput#writeUTF(String)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for strings that uses the UTF8 encoding.
+
+ <p>Also includes utilities for efficiently reading and writing UTF-8.
+
+ @deprecated replaced by Text]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8 -->
+ <!-- start class org.apache.hadoop.io.UTF8.Comparator -->
+ <class name="UTF8.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UTF8.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8.Comparator -->
+ <!-- start class org.apache.hadoop.io.VersionedWritable -->
+ <class name="VersionedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="VersionedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="byte"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the version number of the current implementation.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A base class for Writables that provides version checking.
+
+ <p>This is useful when a class may evolve, so that instances written by the
+ old version of the class may still be processed by the new version. To
+ handle this situation, {@link #readFields(DataInput)}
+ implementations should catch {@link VersionMismatchException}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionedWritable -->
+ <!-- start class org.apache.hadoop.io.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="byte, byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Thrown by {@link VersionedWritable#readFields(DataInput)} when the
+ version of an object being read does not match the current implementation
+ version as returned by {@link VersionedWritable#getVersion()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionMismatchException -->
+ <!-- start class org.apache.hadoop.io.VIntWritable -->
+ <class name="VIntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VIntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VIntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VIntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VIntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for integer values stored in variable-length format.
+ Such values take between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVInt(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VIntWritable -->
+ <!-- start class org.apache.hadoop.io.VLongWritable -->
+ <class name="VLongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VLongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VLongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VLongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VLongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs in a variable-length format. Such values take
+ between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVLong(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VLongWritable -->
+ <!-- start interface org.apache.hadoop.io.Writable -->
+ <interface name="Writable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the fields of this object to <code>out</code>.
+
+ @param out <code>DataOuput</code> to serialize this object into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the fields of this object from <code>in</code>.
+
+ <p>For efficiency, implementations should attempt to re-use storage in the
+ existing object where possible.</p>
+
+ @param in <code>DataInput</code> to deseriablize this object from.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A serializable object which implements a simple, efficient, serialization
+ protocol, based on {@link DataInput} and {@link DataOutput}.
+
+ <p>Any <code>key</code> or <code>value</code> type in the Hadoop Map-Reduce
+ framework implements this interface.</p>
+
+ <p>Implementations typically implement a static <code>read(DataInput)</code>
+ method which constructs a new instance, calls {@link #readFields(DataInput)}
+ and returns the instance.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritable implements Writable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public static MyWritable read(DataInput in) throws IOException {
+ MyWritable w = new MyWritable();
+ w.readFields(in);
+ return w;
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Writable -->
+ <!-- start interface org.apache.hadoop.io.WritableComparable -->
+ <interface name="WritableComparable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable&lt;T&gt;"/>
+ <doc>
+ <![CDATA[A {@link Writable} which is also {@link Comparable}.
+
+ <p><code>WritableComparable</code>s can be compared to each other, typically
+ via <code>Comparator</code>s. Any type which is to be used as a
+ <code>key</code> in the Hadoop Map-Reduce framework should implement this
+ interface.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritableComparable implements WritableComparable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public int compareTo(MyWritableComparable w) {
+ int thisValue = this.value;
+ int thatValue = ((IntWritable)o).value;
+ return (thisValue &lt; thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableComparable -->
+ <!-- start class org.apache.hadoop.io.WritableComparator -->
+ <class name="WritableComparator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator"/>
+ <constructor name="WritableComparator" type="java.lang.Class"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </constructor>
+ <constructor name="WritableComparator" type="java.lang.Class, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get a comparator for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link WritableComparable}
+ implementation.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the WritableComparable implementation class.]]>
+ </doc>
+ </method>
+ <method name="newKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a new {@link WritableComparable} instance.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Optimization hook. Override this to make SequenceFile.Sorter's scream.
+
+ <p>The default implementation reads the data into two {@link
+ WritableComparable}s (using {@link
+ Writable#readFields(DataInput)}, then calls {@link
+ #compare(WritableComparable,WritableComparable)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[Compare two WritableComparables.
+
+ <p> The default implementation uses the natural ordering, calling {@link
+ Comparable#compareTo(Object)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <method name="hashBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Compute hash for binary data.]]>
+ </doc>
+ </method>
+ <method name="readUnsignedShort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an unsigned short from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an integer from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a long from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator for {@link WritableComparable}s.
+
+ <p>This base implemenation uses the natural ordering. To define alternate
+ orderings, override {@link #compare(WritableComparable,WritableComparable)}.
+
+ <p>One may optimize compare-intensive operations by overriding
+ {@link #compare(byte[],int,int,byte[],int,int)}. Static utility methods are
+ provided to assist in optimized implementations of this method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableComparator -->
+ <!-- start class org.apache.hadoop.io.WritableFactories -->
+ <class name="WritableFactories" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="factory" type="org.apache.hadoop.io.WritableFactory"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.io.WritableFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factories for non-public writables. Defining a factory permits {@link
+ ObjectWritable} to be able to construct instances of non-public classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableFactories -->
+ <!-- start interface org.apache.hadoop.io.WritableFactory -->
+ <interface name="WritableFactory" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a new instance.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A factory for a class of Writable.
+ @see WritableFactories]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableFactory -->
+ <!-- start class org.apache.hadoop.io.WritableName -->
+ <class name="WritableName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the name that a class should be known as to something other than the
+ class name.]]>
+ </doc>
+ </method>
+ <method name="addName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add an alternate name for a class.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Return the name for a class. Default is {@link Class#getName()}.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the class for a name. Default is {@link Class#forName(String)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility to permit renaming of Writable implementation classes without
+ invalidiating files that contain their class name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableName -->
+ <!-- start class org.apache.hadoop.io.WritableUtils -->
+ <class name="WritableUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="WritableUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readCompressedByteArray" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skipCompressedByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedByteArray" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="bytes" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="displayByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="record" type="byte[]"/>
+ </method>
+ <method name="clone" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="orig" type="org.apache.hadoop.io.Writable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Make a copy of a writable object using serialization to a buffer.
+ @param orig The object to copy
+ @return The copied object]]>
+ </doc>
+ </method>
+ <method name="cloneInto"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.io.Writable"/>
+ <param name="src" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a copy of the writable object using serialiation to a buffer
+ @param dst the object to copy from
+ @param src the object to copy into, which is destroyed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an integer to a binary stream with zero-compressed encoding.
+ For -120 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ integer is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -121 and -124, the following integer
+ is positive, with number of bytes that follow are -(v+120).
+ If the first byte value v is between -125 and -128, the following integer
+ is negative, with number of bytes that follow are -(v+124). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Integer to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized long from stream.]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized integer from stream.]]>
+ </doc>
+ </method>
+ <method name="isNegativeVInt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Given the first byte of a vint/vlong, determine the sign
+ @param value the first byte
+ @return is the value negative]]>
+ </doc>
+ </method>
+ <method name="decodeVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Parse the first byte of a vint/vlong to determine the number of bytes
+ @param value the first byte of the vint/vlong
+ @return the total number of bytes (1 to 9)]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="readEnum" return="T extends java.lang.Enum&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="enumType" type="java.lang.Class&lt;T&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an Enum value from DataInput, Enums are read and written
+ using String values.
+ @param <T> Enum type
+ @param in DataInput to read from
+ @param enumType Class type of Enum
+ @return Enum represented by String read from DataInput
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeEnum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="enumVal" type="java.lang.Enum"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[writes String value of enum to DataOutput.
+ @param out Dataoutput stream
+ @param enumVal enum value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip <i>len</i> number of bytes in input stream<i>in</i>
+ @param in input stream
+ @param len number of bytes to skip
+ @throws IOException when skipped less number of bytes]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableUtils -->
+</package>
+<package name="org.apache.hadoop.io.compress">
+ <!-- start class org.apache.hadoop.io.compress.CodecPool -->
+ <class name="CodecPool" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CodecPool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <doc>
+ <![CDATA[Get a {@link Compressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the
+ <code>Compressor</code>
+ @return <code>Compressor</code> for the given
+ <code>CompressionCodec</code> from the pool or a new one]]>
+ </doc>
+ </method>
+ <method name="getDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <doc>
+ <![CDATA[Get a {@link Decompressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the
+ <code>Decompressor</code>
+ @return <code>Decompressor</code> for the given
+ <code>CompressionCodec</code> the pool or a new one]]>
+ </doc>
+ </method>
+ <method name="returnCompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <doc>
+ <![CDATA[Return the {@link Compressor} to the pool.
+
+ @param compressor the <code>Compressor</code> to be returned to the pool]]>
+ </doc>
+ </method>
+ <method name="returnDecompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <doc>
+ <![CDATA[Return the {@link Decompressor} to the pool.
+
+ @param decompressor the <code>Decompressor</code> to be returned to the
+ pool]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A global compressor/decompressor pool used to save and reuse
+ (possibly native) compression/decompression codecs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CodecPool -->
+ <!-- start interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <interface name="CompressionCodec" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream}.
+
+ @param out the location for the final output stream
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream} with the given {@link Compressor}.
+
+ @param out the location for the final output stream
+ @param compressor compressor to use
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
+
+ @return the type of compressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Compressor} for use by this {@link CompressionCodec}.
+
+ @return a new compressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a stream decompressor that will read from the given input stream.
+
+ @param in the stream to read compressed bytes from
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionInputStream} that will read from the given
+ {@link InputStream} with the given {@link Decompressor}.
+
+ @param in the stream to read compressed bytes from
+ @param decompressor decompressor to use
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
+
+ @return the type of decompressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
+
+ @return a new decompressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a streaming compression/decompression pair.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <class name="CompressionCodecFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionCodecFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Find the codecs specified in the config value io.compression.codecs
+ and register them. Defaults to gzip and zip.]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Print the extension map out as a string.]]>
+ </doc>
+ </method>
+ <method name="getCodecClasses" return="java.util.List&lt;java.lang.Class&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the list of codecs listed in the configuration
+ @param conf the configuration to look in
+ @return a list of the Configuration classes or null if the attribute
+ was not set]]>
+ </doc>
+ </method>
+ <method name="setCodecClasses"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="classes" type="java.util.List&lt;java.lang.Class&gt;"/>
+ <doc>
+ <![CDATA[Sets a list of codec classes in the configuration.
+ @param conf the configuration to modify
+ @param classes the list of classes to set]]>
+ </doc>
+ </method>
+ <method name="getCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Find the relevant compression codec for the given file based on its
+ filename suffix.
+ @param file the filename to check
+ @return the codec object]]>
+ </doc>
+ </method>
+ <method name="removeSuffix" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes a suffix from a filename, if it has it.
+ @param filename the filename to strip
+ @param suffix the suffix to remove
+ @return the shortened filename]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[A little test program.
+ @param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A factory that will find the correct codec for a given filename.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <class name="CompressionInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression input stream that reads
+ the decompressed bytes from the given stream.
+
+ @param in The input stream to be compressed.]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read bytes from the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the decompressor to its initial state and discard any buffered data,
+ as the underlying stream may have been repositioned.]]>
+ </doc>
+ </method>
+ <field name="in" type="java.io.InputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The input stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression input stream.
+
+ <p>Implementations are assumed to be buffered. This permits clients to
+ reposition the underlying input stream then call {@link #resetState()},
+ without having to also synchronize client buffers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <class name="CompressionOutputStream" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression output stream that writes
+ the compressed bytes to the given stream.
+ @param out]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finishes writing compressed data to the output stream
+ without closing the underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the compression to the initial state.
+ Does not reset the underlying stream.]]>
+ </doc>
+ </method>
+ <field name="out" type="java.io.OutputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The output stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression output stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <!-- start interface org.apache.hadoop.io.compress.Compressor -->
+ <interface name="Compressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for compression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of uncompressed bytes input so far.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of compressed bytes output so far.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[When called, indicates that compression should end
+ with the current contents of the input buffer.]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with compressed data. Returns actual number
+ of bytes of compressed data. A return value of 0 indicates that
+ needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets compressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the compressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'compressor' which can be
+ plugged into a {@link CompressionOutputStream} to compress data.
+ This is modelled after {@link java.util.zip.Deflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Compressor -->
+ <!-- start interface org.apache.hadoop.io.compress.Decompressor -->
+ <interface name="Decompressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for decompression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns <code>true</code> if a preset dictionary is needed for decompression.
+ @return <code>true</code> if a preset dictionary is needed for decompression]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with uncompressed data. Returns actual number
+ of bytes of uncompressed data. A return value of 0 indicates that
+ #needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets decompressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the decompressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'de-compressor' which can be
+ plugged into a {@link CompressionInputStream} to compress data.
+ This is modelled after {@link java.util.zip.Inflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Decompressor -->
+ <!-- start class org.apache.hadoop.io.compress.DefaultCodec -->
+ <class name="DefaultCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="DefaultCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.DefaultCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec -->
+ <class name="GzipCodec" extends="org.apache.hadoop.io.compress.DefaultCodec"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class creates gzip compressors/decompressors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <class name="GzipCodec.GzipInputStream" extends="org.apache.hadoop.io.compress.DecompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipInputStream" type="org.apache.hadoop.io.compress.DecompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow subclasses to directly set the inflater stream.]]>
+ </doc>
+ </constructor>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <class name="GzipCodec.GzipOutputStream" extends="org.apache.hadoop.io.compress.CompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipOutputStream" type="org.apache.hadoop.io.compress.CompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow children types to put a different type in here.
+ @param out the Deflater stream to use]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A bridge that wraps around a DeflaterOutputStream to make it
+ a CompressionOutputStream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <!-- start class org.apache.hadoop.io.compress.LzoCodec -->
+ <class name="LzoCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="LzoCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-lzo library is loaded & initialized.
+
+ @param conf configuration
+ @return <code>true</code> if native-lzo library is loaded & initialized;
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link org.apache.hadoop.io.compress.CompressionCodec} for a streaming
+ <b>lzo</b> compression/decompression pair.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzoCodec -->
+</package>
+<package name="org.apache.hadoop.io.compress.lzo">
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor -->
+ <class name="LzoCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="LzoCompressor" type="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified {@link CompressionStrategy}.
+
+ @param strategy lzo compression algorithm to use
+ @param directBufferSize size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="LzoCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default lzo1x_1 compression.]]>
+ </doc>
+ </constructor>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if lzo compressors are loaded and initialized.
+
+ @return <code>true</code> if lzo compressors are loaded & initialized,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of bytes given to this compressor since last reset.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of bytes consumed by callers of compress since last reset.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Noop.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the lzo algorithm.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy -->
+ <class name="LzoCompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression algorithm for lzo library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor -->
+ <class name="LzoDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="LzoDecompressor" type="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new lzo decompressor.
+
+ @param strategy lzo decompression algorithm
+ @param directBufferSize size of the direct-buffer]]>
+ </doc>
+ </constructor>
+ <constructor name="LzoDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new lzo decompressor.]]>
+ </doc>
+ </constructor>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if lzo decompressors are loaded and initialized.
+
+ @return <code>true</code> if lzo decompressors are loaded & initialized,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the lzo algorithm.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy -->
+ <class name="LzoDecompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy -->
+</package>
+<package name="org.apache.hadoop.io.compress.zlib">
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <class name="BuiltInZlibDeflater" extends="java.util.zip.Deflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="BuiltInZlibDeflater" type="int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Deflater to make it conform
+ to org.apache.hadoop.io.compress.Compressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <class name="BuiltInZlibInflater" extends="java.util.zip.Inflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="BuiltInZlibInflater" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibInflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Inflater to make it conform
+ to org.apache.hadoop.io.compress.Decompressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <class name="ZlibCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="ZlibCompressor" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified compression level.
+ Compressed data will be generated in ZLIB format.
+
+ @param level Compression level #CompressionLevel
+ @param strategy Compression strategy #CompressionStrategy
+ @param header Compression header #CompressionHeader
+ @param directBufferSize Size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default compression level.
+ Compressed data will be generated in ZLIB format.]]>
+ </doc>
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <class name="ZlibCompressor.CompressionHeader" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The type of header for compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <class name="ZlibCompressor.CompressionLevel" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <class name="ZlibCompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <class name="ZlibDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="ZlibDecompressor" type="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new decompressor.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <class name="ZlibDecompressor.CompressionHeader" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The headers to detect from compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+ <class name="ZlibFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ZlibFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeZlibLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-zlib code is loaded & initialized correctly and
+ can be loaded for this job.
+
+ @param conf configuration
+ @return <code>true</code> if native-zlib is loaded & initialized
+ and can be loaded for this job, else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of factories to create the right
+ zlib/gzip compressor/decompressor instances.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+</package>
+<package name="org.apache.hadoop.io.retry">
+ <!-- start class org.apache.hadoop.io.retry.RetryPolicies -->
+ <class name="RetryPolicies" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryPolicies"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="retryUpToMaximumCountWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumTimeWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxTime" type="long"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying for a maximum time, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumCountWithProportionalSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by the number of tries so far.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="exponentialBackoffRetry" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by a random
+ number in the range of [0, 2 to the number of retries)
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map&lt;java.lang.Class&lt;? extends java.lang.Exception&gt;, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByRemoteException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map&lt;java.lang.Class&lt;? extends java.lang.Exception&gt;, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ A retry policy for RemoteException
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <field name="TRY_ONCE_THEN_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail by re-throwing the exception.
+ This corresponds to having no retry mechanism in place.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="TRY_ONCE_DONT_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail silently for <code>void</code> methods, or by
+ re-throwing the exception for non-<code>void</code> methods.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="RETRY_FOREVER" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Keep trying forever.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A collection of useful implementations of {@link RetryPolicy}.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryPolicies -->
+ <!-- start interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <interface name="RetryPolicy" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="shouldRetry" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Exception"/>
+ <param name="retries" type="int"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[<p>
+ Determines whether the framework should retry a
+ method for the given exception, and the number
+ of retries that have been made for that operation
+ so far.
+ </p>
+ @param e The exception that caused the method to fail.
+ @param retries The number of times the method has been retried.
+ @return <code>true</code> if the method should be retried,
+ <code>false</code> if the method should not be retried
+ but shouldn't fail with an exception (only for void methods).
+ @throws Exception The re-thrown exception <code>e</code> indicating
+ that the method failed and should not be retried further.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Specifies a policy for retrying method failures.
+ Implementations of this interface should be immutable.
+ </p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <!-- start class org.apache.hadoop.io.retry.RetryProxy -->
+ <class name="RetryProxy" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryProxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class&lt;?&gt;"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the same retry policy for each method in the interface.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param retryPolicy the policy for retirying method call failures
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class&lt;?&gt;"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="methodNameToPolicyMap" type="java.util.Map&lt;java.lang.String, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the a set of retry policies specified by method name.
+ If no retry policy is defined for a method then a default of
+ {@link RetryPolicies#TRY_ONCE_THEN_FAIL} is used.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param methodNameToPolicyMap a map of method names to retry policies
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for creating retry proxies.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryProxy -->
+</package>
+<package name="org.apache.hadoop.io.serializer">
+ <!-- start interface org.apache.hadoop.io.serializer.Deserializer -->
+ <interface name="Deserializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the deserializer for reading.</p>]]>
+ </doc>
+ </method>
+ <method name="deserialize" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ Deserialize the next object from the underlying input stream.
+ If the object <code>t</code> is non-null then this deserializer
+ <i>may</i> set its internal state to the next object read from the input
+ stream. Otherwise, if the object <code>t</code> is null a new
+ deserialized object will be created.
+ </p>
+ @return the deserialized object]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying input stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for deserializing objects of type <T> from an
+ {@link InputStream}.
+ </p>
+
+ <p>
+ Deserializers are stateful, but must not buffer the input since
+ other producers may read from the input between calls to
+ {@link #deserialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Deserializer -->
+ <!-- start class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <class name="DeserializerComparator" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator&lt;T&gt;"/>
+ <constructor name="DeserializerComparator" type="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link Deserializer} to deserialize
+ the objects to be compared so that the standard {@link Comparator} can
+ be used to compare them.
+ </p>
+ <p>
+ One may optimize compare-intensive operations by using a custom
+ implementation of {@link RawComparator} that operates directly
+ on byte representations.
+ </p>
+ @param <T>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <class name="JavaSerialization" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization&lt;java.io.Serializable&gt;"/>
+ <constructor name="JavaSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;java.io.Serializable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;java.io.Serializable&gt;"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;java.io.Serializable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;java.io.Serializable&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ An experimental {@link Serialization} for Java {@link Serializable} classes.
+ </p>
+ @see JavaSerializationComparator]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <class name="JavaSerializationComparator" extends="org.apache.hadoop.io.serializer.DeserializerComparator&lt;T&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JavaSerializationComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o1" type="T extends java.io.Serializable &amp; java.lang.Comparable&lt;T&gt;"/>
+ <param name="o2" type="T extends java.io.Serializable &amp; java.lang.Comparable&lt;T&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link JavaSerialization}
+ {@link Deserializer} to deserialize objects that are then compared via
+ their {@link Comparable} interfaces.
+ </p>
+ @param <T>
+ @see JavaSerialization]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serialization -->
+ <interface name="Serialization" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Allows clients to test whether this {@link Serialization}
+ supports the given class.]]>
+ </doc>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[@return a {@link Serializer} for the given class.]]>
+ </doc>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[@return a {@link Deserializer} for the given class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Encapsulates a {@link Serializer}/{@link Deserializer} pair.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serialization -->
+ <!-- start class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <class name="SerializationFactory" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SerializationFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Serializations are found by reading the <code>io.serializations</code>
+ property from <code>conf</code>, which is a comma-delimited list of
+ classnames.
+ </p>]]>
+ </doc>
+ </constructor>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <method name="getSerialization" return="org.apache.hadoop.io.serializer.Serialization&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for {@link Serialization}s.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serializer -->
+ <interface name="Serializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the serializer for writing.</p>]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Serialize <code>t</code> to the underlying output stream.</p>]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying output stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for serializing objects of type <T> to an
+ {@link OutputStream}.
+ </p>
+
+ <p>
+ Serializers are stateful, but must not buffer the output since
+ other producers may write to the output between calls to
+ {@link #serialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serializer -->
+ <!-- start class org.apache.hadoop.io.serializer.WritableSerialization -->
+ <class name="WritableSerialization" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization&lt;org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="WritableSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;org.apache.hadoop.io.Writable&gt;"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;org.apache.hadoop.io.Writable&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Serialization} for {@link Writable}s that delegates to
+ {@link Writable#write(java.io.DataOutput)} and
+ {@link Writable#readFields(java.io.DataInput)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.WritableSerialization -->
+</package>
+<package name="org.apache.hadoop.ipc">
+ <!-- start class org.apache.hadoop.ipc.Client -->
+ <class name="Client" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Client" type="java.lang.Class, org.apache.hadoop.conf.Configuration, javax.net.SocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client whose values are of the given {@link Writable}
+ class.]]>
+ </doc>
+ </constructor>
+ <constructor name="Client" type="java.lang.Class&lt;?&gt;, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client with the default SocketFactory
+ @param valueClass
+ @param conf]]>
+ </doc>
+ </constructor>
+ <method name="setPingInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="pingInterval" type="int"/>
+ <doc>
+ <![CDATA[set the ping interval value in configuration
+
+ @param conf Configuration
+ @param pingInterval the ping interval]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop all threads related to this client. No further calls may be made
+ using this client.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a call, passing <code>param</code>, to the IPC server running at
+ <code>address</code>, returning the value. Throws exceptions if there are
+ network problems or if the remote code threw an exception.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="params" type="org.apache.hadoop.io.Writable[]"/>
+ <param name="addresses" type="java.net.InetSocketAddress[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Makes a set of calls in parallel. Each parameter is sent to the
+ corresponding address. When all values are available, or have timed out
+ or errored, the collected results are returned in an array. The array
+ contains nulls for calls that timed out or errored.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A client for an IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Server]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Client -->
+ <!-- start class org.apache.hadoop.ipc.RemoteException -->
+ <class name="RemoteException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RemoteException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lookupTypes" type="java.lang.Class[]"/>
+ <doc>
+ <![CDATA[If this remote exception wraps up one of the lookupTypes
+ then return this exception.
+ <p>
+ Unwraps any IOException.
+
+ @param lookupTypes the desired exception class.
+ @return IOException, which is either the lookupClass exception or this.]]>
+ </doc>
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Instantiate and return the exception wrapped up by this remote exception.
+
+ <p> This unwraps any <code>Throwable</code> that has a constructor taking
+ a <code>String</code> as a parameter.
+ Otherwise it returns this.
+
+ @return <code>Throwable]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RemoteException -->
+ <!-- start class org.apache.hadoop.ipc.RPC -->
+ <class name="RPC" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="waitForProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object with the default SocketFactory
+
+ @param protocol
+ @param clientVersion
+ @param addr
+ @param conf
+ @return a proxy instance
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopProxy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="proxy" type="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <doc>
+ <![CDATA[Stop this proxy and release its invoker's resource
+ @param proxy the proxy to be stopped]]>
+ </doc>
+ </method>
+ <method name="call" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="method" type="java.lang.reflect.Method"/>
+ <param name="params" type="java.lang.Object[][]"/>
+ <param name="addrs" type="java.net.InetSocketAddress[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Expert: Make multiple, parallel calls to a set of servers.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="numHandlers" type="int"/>
+ <param name="verbose" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple RPC mechanism.
+
+ A <i>protocol</i> is a Java interface. All parameters and return types must
+ be one of:
+
+ <ul> <li>a primitive type, <code>boolean</code>, <code>byte</code>,
+ <code>char</code>, <code>short</code>, <code>int</code>, <code>long</code>,
+ <code>float</code>, <code>double</code>, or <code>void</code>; or</li>
+
+ <li>a {@link String}; or</li>
+
+ <li>a {@link Writable}; or</li>
+
+ <li>an array of the above types</li> </ul>
+
+ All methods in the protocol should throw only IOException. No field data of
+ the protocol instance is transmitted.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC -->
+ <!-- start class org.apache.hadoop.ipc.RPC.Server -->
+ <class name="RPC.Server" extends="org.apache.hadoop.ipc.Server"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on]]>
+ </doc>
+ </constructor>
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on
+ @param numHandlers the number of method handler threads to run
+ @param verbose whether each call should be logged]]>
+ </doc>
+ </constructor>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receivedTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An RPC Server.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.Server -->
+ <!-- start class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <class name="RPC.VersionMismatch" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.VersionMismatch" type="java.lang.String, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a version mismatch exception
+ @param interfaceName the name of the protocol mismatch
+ @param clientVersion the client's version of the protocol
+ @param serverVersion the server's version of the protocol]]>
+ </doc>
+ </constructor>
+ <method name="getInterfaceName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the interface name
+ @return the java class name
+ (eg. org.apache.hadoop.mapred.InterTrackerProtocol)]]>
+ </doc>
+ </method>
+ <method name="getClientVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the client's preferred version]]>
+ </doc>
+ </method>
+ <method name="getServerVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the server's agreed to version.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A version mismatch for the RPC protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <!-- start class org.apache.hadoop.ipc.Server -->
+ <class name="Server" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class, int, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class&lt;?&gt;, int, org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a server listening on the named port and address. Parameters passed must
+ be of the named class. The <code>handlerCount</handlerCount> determines
+ the number of handler threads that will be used to process calls.]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.ipc.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the server instance called under or null. May be called under
+ {@link #call(Writable, long)} implementations, and under {@link Writable}
+ methods of paramters and return values. Permits applications to access
+ the server context.]]>
+ </doc>
+ </method>
+ <method name="getRemoteIp" return="java.net.InetAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the remote side ip address when invoked inside an RPC
+ Returns null incase of an error.]]>
+ </doc>
+ </method>
+ <method name="getRemoteAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns remote address as a string when invoked inside an RPC.
+ Returns null in case of an error.]]>
+ </doc>
+ </method>
+ <method name="bind"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.ServerSocket"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <param name="backlog" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A convenience method to bind to a given address and report
+ better exceptions if the address is not a valid host.
+ @param socket the socket to bind
+ @param address the address to bind to
+ @param backlog the number of connections allowed in the queue
+ @throws BindException if the address can't be bound
+ @throws UnknownHostException if the address isn't a valid host name
+ @throws IOException other random errors from bind]]>
+ </doc>
+ </method>
+ <method name="setTimeout"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[No longer used.]]>
+ </doc>
+ </method>
+ <method name="setSocketSendBufSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Sets the socket buffer size used for responding to RPCs]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts the service. Must be called before any calls will be handled.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops the service. No new calls will be handled after this is called.]]>
+ </doc>
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Wait for the server to be stopped.
+ Does not wait for all subthreads to finish.
+ See {@link #stop()}.]]>
+ </doc>
+ </method>
+ <method name="getListenerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the socket (ip+port) on which the RPC server is listening to.
+ @return the socket (ip+port) on which the RPC server is listening to.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receiveTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called for each call.]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <field name="HEADER" type="java.nio.ByteBuffer"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The first four bytes of Hadoop RPC connections]]>
+ </doc>
+ </field>
+ <field name="CURRENT_VERSION" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="rpcMetrics" type="org.apache.hadoop.ipc.metrics.RpcMetrics"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Client]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Server -->
+ <!-- start interface org.apache.hadoop.ipc.VersionedProtocol -->
+ <interface name="VersionedProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return protocol version corresponding to protocol interface.
+ @param protocol The classname of the protocol interface
+ @param clientVersion The version of the protocol that the client speaks
+ @return the version that the server will speak]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Superclass of all protocols that use Hadoop RPC.
+ Subclasses of this interface are also supposed to have
+ a static final long versionID field.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.VersionedProtocol -->
+</package>
+<package name="org.apache.hadoop.ipc.metrics">
+ <!-- start class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <class name="RpcMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="RpcMetrics" type="java.lang.String, java.lang.String, org.apache.hadoop.ipc.Server"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Push the metrics to the monitoring subsystem on doUpdate() call.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="rpcQueueTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The metrics variables are public:
+ - they can be set directly by calling their set/inc methods
+ -they can also be read directly - e.g. JMX does this.]]>
+ </doc>
+ </field>
+ <field name="rpcProcessingTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="metricsList" type="java.util.Map&lt;java.lang.String, org.apache.hadoop.metrics.util.MetricsTimeVaryingRate&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various RPC statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #rpcQueueTime}.inc(time)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <!-- start interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+ <interface name="RpcMgtMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRpcOpsNumber" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of RPC Operations in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for RPC Operations in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Average RPC Operation Queued Time in the last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all min max times]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for the RPC layer.
+ Many of the statistics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the statistics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ rpc.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+</package>
+<package name="org.apache.hadoop.log">
+ <!-- start class org.apache.hadoop.log.LogLevel -->
+ <class name="LogLevel" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[A command line implementation]]>
+ </doc>
+ </method>
+ <field name="USAGES" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Change log level in runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel -->
+ <!-- start class org.apache.hadoop.log.LogLevel.Servlet -->
+ <class name="LogLevel.Servlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel.Servlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A servlet implementation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel.Servlet -->
+</package>
+<package name="org.apache.hadoop.mapred">
+ <!-- start class org.apache.hadoop.mapred.ClusterStatus -->
+ <class name="ClusterStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of task trackers in the cluster.
+
+ @return the number of task trackers in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running map tasks in the cluster.
+
+ @return the number of currently running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running reduce tasks in the cluster.
+
+ @return the number of currently running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running map tasks in the cluster.
+
+ @return the maximum capacity for running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running reduce tasks in the cluster.
+
+ @return the maximum capacity for running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getJobTrackerState" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current state of the <code>JobTracker</code>,
+ as {@link JobTracker.State}
+
+ @return the current state of the <code>JobTracker</code>.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Status information on the current state of the Map-Reduce cluster.
+
+ <p><code>ClusterStatus</code> provides clients with information such as:
+ <ol>
+ <li>
+ Size of the cluster.
+ </li>
+ <li>
+ Task capacity of the cluster.
+ </li>
+ <li>
+ The number of currently running map & reduce tasks.
+ </li>
+ <li>
+ State of the <code>JobTracker</code>.
+ </li>
+ </ol></p>
+
+ <p>Clients can query for the latest <code>ClusterStatus</code>, via
+ {@link JobClient#getClusterStatus()}.</p>
+
+ @see JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ClusterStatus -->
+ <!-- start class org.apache.hadoop.mapred.Counters -->
+ <class name="Counters" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.mapred.Counters.Group&gt;"/>
+ <constructor name="Counters"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getGroupNames" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all counter classes.
+ @return Set of counter names.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.mapred.Counters.Group&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getGroup" return="org.apache.hadoop.mapred.Counters.Group"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="groupName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the named counter group, or an empty group if there is none
+ with the specified name.]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Find the counter for the given enum. The same enum will always return the
+ same counter.
+ @param key the counter key
+ @return the matching counter object]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="group" type="java.lang.String"/>
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Find a counter by using strings
+ @param group the name of the group
+ @param id the id of the counter within the group (0 to N-1)
+ @param name the internal name of the counter
+ @return the counter for that name
+ @deprecated]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param key identifies a counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="counter" type="java.lang.String"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param group the name of the group
+ @param counter the internal name of the counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Returns current value of the specified counter, or 0 if the counter
+ does not exist.]]>
+ </doc>
+ </method>
+ <method name="incrAllCounters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Increments multiple counters by their amounts in another Counters
+ instance.
+ @param other the other Counters instance]]>
+ </doc>
+ </method>
+ <method name="sum" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.mapred.Counters"/>
+ <param name="b" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Convenience method for computing the sum of two sets of counters.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of counters, by summing the number of counters
+ in each group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the set of groups.
+ The external format is:
+ #groups (groupName group)*
+
+ i.e. the number of groups followed by 0 or more groups, where each
+ group is of the form:
+
+ groupDisplayName #counters (false | true counter)*
+
+ where each counter is of the form:
+
+ name (false | true displayName) value]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a set of groups.]]>
+ </doc>
+ </method>
+ <method name="log"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Logs the current counter values.
+ @param log The log to use.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return textual representation of the counter values.]]>
+ </doc>
+ </method>
+ <method name="makeCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert a counters object into a single line that is easy to parse.
+ @return the string with "name=value" for each counter and separated by ","]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A set of named counters.
+
+ <p><code>Counters</code> represent global counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> can be of
+ any {@link Enum} type.</p>
+
+ <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
+ counters from a particular <code>Enum</code> class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Counter -->
+ <class name="Counters.Counter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the internal name of the counter.
+ @return the internal name of the counter]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the counter.
+ @return the user facing name of the counter]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[What is the current value of this counter?
+ @return the current value]]>
+ </doc>
+ </method>
+ <method name="increment"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Increment this counter by the given value
+ @param incr the value to increase this counter by]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A counter record, comprising its name and value.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Counter -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Group -->
+ <class name="Counters.Group" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.mapred.Counters.Counter&gt;"/>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns raw name of the group. This is the name of the enum class
+ for this group of counters.]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns localized name of the group. This is the same as getName() by
+ default, but different if an appropriate ResourceBundle is found.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="counterName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the specified counter, or 0 if the counter does
+ not exist.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getCounter(String)} instead">
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given id and create it if it doesn't exist.
+ @param id the numeric id of the counter within the group
+ @param name the internal counter name
+ @return the counter
+ @deprecated use {@link #getCounter(String)} instead]]>
+ </doc>
+ </method>
+ <method name="getCounterForName" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given name and create it if it doesn't exist.
+ @param name the internal counter name
+ @return the counter]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of counters in this group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.mapred.Counters.Counter&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<code>Group</code> of counters, comprising of counters from a particular
+ counter {@link Enum} class.
+
+ <p><code>Group</code>handles localization of the class name and the
+ counter names.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Group -->
+ <!-- start class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <class name="DefaultJobHistoryParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DefaultJobHistoryParser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseJobTasks"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobHistoryFile" type="java.lang.String"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobHistory.JobInfo"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Populates a JobInfo object from the job's history log file.
+ @param jobHistoryFile history file for this job.
+ @param job a precreated JobInfo object, should be non-null.
+ @param fs FileSystem where historyFile is present.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Default parser for job history files. It creates object model from
+ job history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <!-- start class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <class name="FileAlreadyExistsException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileAlreadyExistsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileAlreadyExistsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when target file already exists for any operation and
+ is not configured to be overwritten.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <!-- start class org.apache.hadoop.mapred.FileInputFormat -->
+ <class name="FileInputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <constructor name="FileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setMinSplitSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="minSplitSize" type="long"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="filename" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Is the given filename splitable? Usually, true, but if the file is
+ stream compressed, it will not be.
+
+ <code>FileInputFormat</code> implementations can override this and return
+ <code>false</code> to ensure that individual input files are never split-up
+ so that {@link Mapper}s process entire files.
+
+ @param fs the file system that the file is on
+ @param filename the file name to check
+ @return is this file splitable?]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setInputPathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="filter" type="java.lang.Class&lt;? extends org.apache.hadoop.fs.PathFilter&gt;"/>
+ <doc>
+ <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job.
+
+ @param filter the PathFilter class use for filtering the input paths.]]>
+ </doc>
+ </method>
+ <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get a PathFilter instance of the filter set for the input paths.
+
+ @return the PathFilter instance set for the job, NULL if none has been set.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression.
+
+ @param job the job to list input paths for
+ @return array of FileStatus objects
+ @throws IOException if zero items.]]>
+ </doc>
+ </method>
+ <method name="listPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="Use {@link #listStatus(JobConf)} instead.">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression.
+
+ @param job the job to list input paths for
+ @return array of Path objects
+ @throws IOException if zero items.
+ @deprecated Use {@link #listStatus(JobConf)} instead.]]>
+ </doc>
+ </method>
+ <method name="validateInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Splits files returned by {@link #listStatus(JobConf)} when
+ they're too big.]]>
+ </doc>
+ </method>
+ <method name="computeSplitSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="goalSize" type="long"/>
+ <param name="minSize" type="long"/>
+ <param name="blockSize" type="long"/>
+ </method>
+ <method name="getBlockIndex" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+ <param name="offset" type="long"/>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the given comma separated paths as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be set as
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add the given comma separated paths to the list of inputs for
+ the map-reduce job.
+
+ @param conf The configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be added to
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Set the array of {@link Path}s as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job.
+ @param inputPaths the {@link Path}s of the input directories/files
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+
+ @param conf The configuration of the job
+ @return the list of input {@link Path}s for the map-reduce job.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class for file-based {@link InputFormat}.
+
+ <p><code>FileInputFormat</code> is the base class for all file-based
+ <code>InputFormat</code>s. This provides a generic implementation of
+ {@link #getSplits(JobConf, int)}.
+ Subclasses of <code>FileInputFormat</code> can also override the
+ {@link #isSplitable(FileSystem, Path)} method to ensure input-files are
+ not split-up and are processed as a whole by {@link Mapper}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileOutputFormat -->
+ <class name="FileOutputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="FileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setCompressOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+ <code>false</code> otherwise]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+ compress the job outputs]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the
+ job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the {@link Path} of the output directory for the map-reduce job.
+
+ @param conf The configuration of the job.
+ @param outputDir the {@link Path} of the output directory for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the output directory for the map-reduce job.
+
+ @return the {@link Path} to the output directory for the map-reduce job.
+ @see FileOutputFormat#getWorkOutputPath(JobConf)]]>
+ </doc>
+ </method>
+ <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the task's temporary output directory
+ for the map-reduce job
+
+ <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4>
+
+ <p>Some applications need to create/write-to side-files, which differ from
+ the actual job-outputs.
+
+ <p>In such cases there could be issues with 2 instances of the same TIP
+ (running simultaneously e.g. speculative tasks) trying to open/write-to the
+ same file (path) on HDFS. Hence the application-writer will have to pick
+ unique names per task-attempt (e.g. using the attemptid, say
+ <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p>
+
+ <p>To get around this the Map-Reduce framework helps the application-writer
+ out by maintaining a special
+ <tt>${mapred.output.dir}/_temporary/_${taskid}</tt>
+ sub-directory for each task-attempt on HDFS where the output of the
+ task-attempt goes. On successful completion of the task-attempt the files
+ in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only)
+ are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the
+ framework discards the sub-directory of unsuccessful task-attempts. This
+ is completely transparent to the application.</p>
+
+ <p>The application-writer can take advantage of this by creating any
+ side-files required in <tt>${mapred.work.output.dir}</tt> during execution
+ of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the
+ framework will move them out similarly - thus she doesn't have to pick
+ unique paths per task-attempt.</p>
+
+ <p><i>Note</i>: the value of <tt>${mapred.work.output.dir}</tt> during
+ execution of a particular task-attempt is actually
+ <tt>${mapred.output.dir}/_temporary/_{$taskid}</tt>, and this value is
+ set by the map-reduce framework. So, just create any side-files in the
+ path returned by {@link #getWorkOutputPath(JobConf)} from map/reduce
+ task to take advantage of this feature.</p>
+
+ <p>The entire discussion holds true for maps of jobs with
+ reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
+ goes directly to HDFS.</p>
+
+ @return the {@link Path} to the task's temporary output directory
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to create the task's temporary output directory and
+ return the path to the task's output file.
+
+ @param conf job-configuration
+ @param name temporary task-output filename
+ @return path to the task's temporary output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base class for {@link OutputFormat}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileSplit -->
+ <class name="FileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[Constructs a split.
+ @deprecated
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process]]>
+ </doc>
+ </constructor>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null]]>
+ </doc>
+ </constructor>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file containing this split's data.]]>
+ </doc>
+ </method>
+ <method name="getStart" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The position of the first byte in the file to process.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the file to process.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A section of an input file. Returned by {@link
+ InputFormat#getSplits(JobConf, int)} and passed to
+ {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileSplit -->
+ <!-- start class org.apache.hadoop.mapred.ID -->
+ <class name="ID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable&lt;org.apache.hadoop.mapred.ID&gt;"/>
+ <constructor name="ID" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructs an ID object from the given int]]>
+ </doc>
+ </constructor>
+ <constructor name="ID"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[returns the int which represents the identifier]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare IDs by associated numbers]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.ID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.ID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct an ID object from given string
+
+ @return constructed Id object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A general identifier, which internally stores the id
+ as an integer. This is the super class of {@link JobID},
+ {@link TaskID} and {@link TaskAttemptID}.
+
+ @see JobID
+ @see TaskID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ID -->
+ <!-- start interface org.apache.hadoop.mapred.InputFormat -->
+ <interface name="InputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="validateInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="getSplits is called in the client and can perform any
+ necessary validation of the input">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check for validity of the input-specification for the job.
+
+ <p>This method is used to validate the input directories when a job is
+ submitted so that the {@link JobClient} can fail early, with an useful
+ error message, in case of errors. For e.g. input directory does not exist.
+ </p>
+
+ @param job job configuration.
+ @throws InvalidInputException if the job does not have valid input
+ @deprecated getSplits is called in the client and can perform any
+ necessary validation of the input]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically split the set of input files for the job.
+
+ <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper}
+ for processing.</p>
+
+ <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the
+ input files are not physically split into chunks. For e.g. a split could
+ be <i>&lt;input-file-path, start, offset&gt;</i> tuple.
+
+ @param job job configuration.
+ @param numSplits the desired number of splits, a hint.
+ @return an array of {@link InputSplit}s for the job.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordReader} for the given {@link InputSplit}.
+
+ <p>It is the responsibility of the <code>RecordReader</code> to respect
+ record boundaries while processing the logical split to present a
+ record-oriented view to the individual task.</p>
+
+ @param split the {@link InputSplit}
+ @param job the job that this split belongs to
+ @return a {@link RecordReader}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputFormat</code> describes the input-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the input-specification of the job.
+ <li>
+ Split-up the input file(s) into logical {@link InputSplit}s, each of
+ which is then assigned to an individual {@link Mapper}.
+ </li>
+ <li>
+ Provide the {@link RecordReader} implementation to be used to glean
+ input records from the logical <code>InputSplit</code> for processing by
+ the {@link Mapper}.
+ </li>
+ </ol>
+
+ <p>The default behavior of file-based {@link InputFormat}s, typically
+ sub-classes of {@link FileInputFormat}, is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of the input files. However, the {@link FileSystem} blocksize of
+ the input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Clearly, logical splits based on input-size is insufficient for many
+ applications since record boundaries are to respected. In such cases, the
+ application has to also implement a {@link RecordReader} on whom lies the
+ responsibilty to respect record-boundaries and present a record-oriented
+ view of the logical <code>InputSplit</code> to the individual task.
+
+ @see InputSplit
+ @see RecordReader
+ @see JobClient
+ @see FileInputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.InputSplit -->
+ <interface name="InputSplit" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the total number of bytes in the data of the <code>InputSplit</code>.
+
+ @return the number of bytes in the input split.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hostnames where the input split is located.
+
+ @return list of hostnames where data of the <code>InputSplit</code> is
+ located as an array of <code>String</code>s.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputSplit</code> represents the data to be processed by an
+ individual {@link Mapper}.
+
+ <p>Typically, it presents a byte-oriented view on the input and is the
+ responsibility of {@link RecordReader} of the job to process this and present
+ a record-oriented view.
+
+ @see InputFormat
+ @see RecordReader]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputSplit -->
+ <!-- start class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <class name="InvalidFileTypeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidFileTypeException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidFileTypeException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when file type differs from the desired file type. like
+ getting a file when a directory is expected. Or a wrong file type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidInputException -->
+ <class name="InvalidInputException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidInputException" type="java.util.List&lt;java.io.IOException&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create the exception with the given list.
+ @param probs the list of problems to report. this list is not copied.]]>
+ </doc>
+ </constructor>
+ <method name="getProblems" return="java.util.List&lt;java.io.IOException&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete list of the problems reported.
+ @return the list of problems, which must not be modified]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get a summary message of the problems found.
+ @return the concatenated messages from all of the problems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class wraps a list of problems with the input, so that the user
+ can get a list of problems together instead of finding and fixing them one
+ by one.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidInputException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <class name="InvalidJobConfException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidJobConfException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidJobConfException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when jobconf misses some mendatory attributes
+ or value of some attributes is invalid.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <!-- start class org.apache.hadoop.mapred.IsolationRunner -->
+ <class name="IsolationRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IsolationRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Run a single task
+ @param args the first argument is the task directory]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.IsolationRunner -->
+ <!-- start class org.apache.hadoop.mapred.JobClient -->
+ <class name="JobClient" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobClient"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job client.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client with the given {@link JobConf}, and connect to the
+ default {@link JobTracker}.
+
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client, connect to the indicated job tracker.
+
+ @param jobTrackAddr the job tracker to connect to.
+ @param conf configuration.]]>
+ </doc>
+ </constructor>
+ <method name="getCommandLineConfig" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the command line configuration]]>
+ </doc>
+ </method>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Connect to the default {@link JobTracker}.
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the <code>JobClient</code>.]]>
+ </doc>
+ </method>
+ <method name="getFs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a filesystem handle. We need this to prepare jobs
+ for submission to the MapReduce system.
+
+ @return the filesystem handle.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobFile" type="java.lang.String"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param jobFile the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param job the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an {@link RunningJob} object to track an ongoing job. Returns
+ null if the id does not correspond to any known job.
+
+ @param jobid the jobid of the job.
+ @return the {@link RunningJob} handle to track the job, null if the
+ <code>jobid</code> doesn't correspond to any known job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getJob(JobID)}.">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getJob(JobID)}.]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the map tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the map tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getMapTaskReports(JobID)}">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getMapTaskReports(JobID)}]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the reduce tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the reduce tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getReduceTaskReports(JobID)}">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getReduceTaskReports(JobID)}]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the Map-Reduce cluster.
+
+ @return the status information about the Map-Reduce cluster as an object
+ of {@link ClusterStatus}.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are not completed and not failed.
+
+ @return array of {@link JobStatus} for the running/to-be-run jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are submitted.
+
+ @return array of {@link JobStatus} for the submitted jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="runJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Utility that submits a job, then polls for progress until the job is
+ complete.
+
+ @param job the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Sets the output filter for tasks. only those tasks are printed whose
+ output matches the filter.
+ @param newValue task filter.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the task output filter out of the JobConf.
+
+ @param job the JobConf to examine.
+ @return the filter level.]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Modify the JobConf to set the task output filter.
+
+ @param job the JobConf to modify.
+ @param newValue the value to set.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task output filter.
+ @return task filter.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="getDefaultMaps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the max available Maps in the cluster.
+
+ @return the max available Maps in the cluster
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDefaultReduces" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the max available Reduces in the cluster.
+
+ @return the max available Reduces in the cluster
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getSystemDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Grab the jobtracker system directory path where job-specific files are to be placed.
+
+ @return the system directory where job-specific files are to be placed.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[<code>JobClient</code> is the primary interface for the user-job to interact
+ with the {@link JobTracker}.
+
+ <code>JobClient</code> provides facilities to submit jobs, track their
+ progress, access component-tasks' reports/logs, get the Map-Reduce cluster
+ status information etc.
+
+ <p>The job submission process involves:
+ <ol>
+ <li>
+ Checking the input and output specifications of the job.
+ </li>
+ <li>
+ Computing the {@link InputSplit}s for the job.
+ </li>
+ <li>
+ Setup the requisite accounting information for the {@link DistributedCache}
+ of the job, if necessary.
+ </li>
+ <li>
+ Copying the job's jar and configuration to the map-reduce system directory
+ on the distributed file-system.
+ </li>
+ <li>
+ Submitting the job to the <code>JobTracker</code> and optionally monitoring
+ it's status.
+ </li>
+ </ol></p>
+
+ Normally the user creates the application, describes various facets of the
+ job via {@link JobConf} and then uses the <code>JobClient</code> to submit
+ the job and monitor its progress.
+
+ <p>Here is an example on how to use <code>JobClient</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ job.setInputPath(new Path("in"));
+ job.setOutputPath(new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ </pre></blockquote></p>
+
+ <h4 id="JobControl">Job Control</h4>
+
+ <p>At times clients would chain map-reduce jobs to accomplish complex tasks
+ which cannot be done via a single map-reduce job. This is fairly easy since
+ the output of the job, typically, goes to distributed file-system and that
+ can be used as the input for the next job.</p>
+
+ <p>However, this also means that the onus on ensuring jobs are complete
+ (success/failure) lies squarely on the clients. In such situations the
+ various job-control options are:
+ <ol>
+ <li>
+ {@link #runJob(JobConf)} : submits the job and returns only after
+ the job has completed.
+ </li>
+ <li>
+ {@link #submitJob(JobConf)} : only submits the job, then poll the
+ returned handle to the {@link RunningJob} to query status and make
+ scheduling decisions.
+ </li>
+ <li>
+ {@link JobConf#setJobEndNotificationURI(String)} : setup a notification
+ on job-completion, thus avoiding polling.
+ </li>
+ </ol></p>
+
+ @see JobConf
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient -->
+ <!-- start class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <class name="JobClient.TaskStatusFilter" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobClient.TaskStatusFilter&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <!-- start class org.apache.hadoop.mapred.JobConf -->
+ <class name="JobConf" extends="org.apache.hadoop.conf.Configuration"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <method name="getJar" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user jar for the map-reduce job.
+
+ @return the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJar"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jar" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user jar for the map-reduce job.
+
+ @param jar the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJarByClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the job's jar file by finding an example class location.
+
+ @param cls the example class.]]>
+ </doc>
+ </method>
+ <method name="getSystemDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link JobClient#getSystemDir()} instead.
+ Get the system directory where job-specific files are to be placed.">
+ <doc>
+ <![CDATA[@deprecated Use {@link JobClient#getSystemDir()} instead.
+ Get the system directory where job-specific files are to be placed.
+
+ @return the system directory where job-specific files are to be placed.]]>
+ </doc>
+ </method>
+ <method name="getLocalDirs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="subdir" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a local file name. Files are distributed among configured
+ local directories.]]>
+ </doc>
+ </method>
+ <method name="setInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileInputFormat#setInputPaths(JobConf, Path...)} or
+ {@link FileInputFormat#setInputPaths(JobConf, String)}">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the {@link Path} of the input directory for the map-reduce job.
+
+ @param dir the {@link Path} of the input directory for the map-reduce job.
+ @deprecated Use {@link FileInputFormat#setInputPaths(JobConf, Path...)} or
+ {@link FileInputFormat#setInputPaths(JobConf, String)}]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileInputFormat#addInputPath(JobConf, Path)} or
+ {@link FileInputFormat#addInputPaths(JobConf, String)}">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+
+ @param dir {@link Path} to be added to the list of inputs for
+ the map-reduce job.
+ @deprecated Use {@link FileInputFormat#addInputPath(JobConf, Path)} or
+ {@link FileInputFormat#addInputPaths(JobConf, String)}]]>
+ </doc>
+ </method>
+ <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileInputFormat#getInputPaths(JobConf)}">
+ <doc>
+ <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+
+ @return the list of input {@link Path}s for the map-reduce job.
+ @deprecated Use {@link FileInputFormat#getInputPaths(JobConf)}]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reported username for this job.
+
+ @return the username]]>
+ </doc>
+ </method>
+ <method name="setUser"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the reported username for this job.
+
+ @param user the username for this job.]]>
+ </doc>
+ </method>
+ <method name="setKeepFailedTaskFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the framework should keep the intermediate files for
+ failed tasks.
+
+ @param keep <code>true</code> if framework should keep the intermediate files
+ for failed tasks, <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="getKeepFailedTaskFiles" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should the temporary files for failed tasks be kept?
+
+ @return should the files be kept?]]>
+ </doc>
+ </method>
+ <method name="setKeepTaskFilesPattern"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pattern" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set a regular expression for task names that should be kept.
+ The regular expression ".*_m_000123_0" would keep the files
+ for the first instance of map 123 that ran.
+
+ @param pattern the java.util.regex.Pattern to match against the
+ task names.]]>
+ </doc>
+ </method>
+ <method name="getKeepTaskFilesPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the regular expression that is matched against the task names
+ to see if we need to keep the files.
+
+ @return the pattern as a string, if it was set, othewise null.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the default file system.
+
+ @param dir the new current working directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the default file system.
+
+ @return the directory name.]]>
+ </doc>
+ </method>
+ <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileOutputFormat#getOutputPath(JobConf)} or
+ {@link FileOutputFormat#getWorkOutputPath(JobConf)}
+ Get the {@link Path} to the output directory for the map-reduce job.">
+ <doc>
+ <![CDATA[@deprecated Use {@link FileOutputFormat#getOutputPath(JobConf)} or
+ {@link FileOutputFormat#getWorkOutputPath(JobConf)}
+ Get the {@link Path} to the output directory for the map-reduce job.
+
+ @return the {@link Path} to the output directory for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileOutputFormat#setOutputPath(JobConf, Path)}
+ Set the {@link Path} of the output directory for the map-reduce job.
+
+ lEsS_tHaNp>lEsS_tHaNi>NotelEsS_tHaN/i>:
+ lEsS_tHaN/p>">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[@deprecated Use {@link FileOutputFormat#setOutputPath(JobConf, Path)}
+ Set the {@link Path} of the output directory for the map-reduce job.
+
+ <p><i>Note</i>:
+ </p>
+ @param dir the {@link Path} of the output directory for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getInputFormat" return="org.apache.hadoop.mapred.InputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link InputFormat} implementation for the map-reduce job,
+ defaults to {@link TextInputFormat} if not specified explicity.
+
+ @return the {@link InputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link InputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link InputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="getOutputFormat" return="org.apache.hadoop.mapred.OutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link OutputFormat} implementation for the map-reduce job,
+ defaults to {@link TextOutputFormat} if not specified explicity.
+
+ @return the {@link OutputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setOutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputFormat&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link OutputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link OutputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="setCompressMapOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Should the map outputs be compressed before transfer?
+ Uses the SequenceFile compression.
+
+ @param compress should the map outputs be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressMapOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Are the outputs of the maps be compressed?
+
+ @return <code>true</code> if the outputs of the maps are to be compressed,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="{@link CompressionType} is no longer valid for intermediate
+ map-outputs.">
+ <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionType} for the map outputs.
+
+ @param style the {@link CompressionType} to control how the map outputs
+ are compressed.
+ @deprecated {@link CompressionType} is no longer valid for intermediate
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="{@link CompressionType} is no longer valid for intermediate
+ map-outputs.">
+ <doc>
+ <![CDATA[Get the {@link CompressionType} for the map outputs.
+
+ @return the {@link CompressionType} for map outputs, defaulting to
+ {@link CompressionType#RECORD}.
+ @deprecated {@link CompressionType} is no longer valid for intermediate
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the given class as the {@link CompressionCodec} for the map outputs.
+
+ @param codecClass the {@link CompressionCodec} class that will compress
+ the map outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the map outputs.
+
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} class that should be used to compress the
+ map outputs.
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getMapOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the map output data. If it is not set, use the
+ (final) output key class. This allows the map output key class to be
+ different than the final output key class.
+
+ @return the map output key class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the map output data. This allows the user to
+ specify the map output key class to be different than the final output
+ value class.
+
+ @param theClass the map output key class.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for the map output data. If it is not set, use the
+ (final) output value class This allows the map output value class to be
+ different than the final output value class.
+
+ @return the map output value class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for the map output data. This allows the user to
+ specify the map output value class to be different than the final output
+ value class.
+
+ @param theClass the map output value class.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the job output data.
+
+ @return the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the job output data.
+
+ @param theClass the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link RawComparator} comparator used to compare keys.
+
+ @return the {@link RawComparator} comparator used to compare keys.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyComparatorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.RawComparator&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link RawComparator} comparator used to compare keys.
+
+ @param theClass the {@link RawComparator} comparator used to
+ compare keys.
+ @see #setOutputValueGroupingComparator(Class)]]>
+ </doc>
+ </method>
+ <method name="getOutputValueGroupingComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user defined {@link WritableComparable} comparator for
+ grouping keys of inputs to the reduce.
+
+ @return comparator set by the user for grouping values.
+ @see #setOutputValueGroupingComparator(Class) for details.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueGroupingComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.RawComparator&gt;"/>
+ <doc>
+ <![CDATA[Set the user defined {@link RawComparator} comparator for
+ grouping keys in the input to the reduce.
+
+ <p>This comparator should be provided if the equivalence rules for keys
+ for sorting the intermediates are different from those for grouping keys
+ before each call to
+ {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
+
+ <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
+ in a single call to the reduce function if K1 and K2 compare as equal.</p>
+
+ <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
+ how keys are sorted, this can be used in conjunction to simulate
+ <i>secondary sort on values</i>.</p>
+
+ <p><i>Note</i>: This is not a guarantee of the reduce sort being
+ <i>stable</i> in any sense. (In any case, with the order of available
+ map-outputs to the reduce being non-deterministic, it wouldn't make
+ that much sense.)</p>
+
+ @param theClass the comparator class to be used for grouping keys.
+ It should implement <code>RawComparator</code>.
+ @see #setOutputKeyComparatorClass(Class)]]>
+ </doc>
+ </method>
+ <method name="getOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for job outputs.
+
+ @return the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for job outputs.
+
+ @param theClass the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapperClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Mapper} class for the job.
+
+ @return the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapperClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Mapper} class for the job.
+
+ @param theClass the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getMapRunnerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.MapRunnable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link MapRunnable} class for the job.
+
+ @return the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapRunnerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.MapRunnable&gt;"/>
+ <doc>
+ <![CDATA[Expert: Set the {@link MapRunnable} class for the job.
+
+ Typically used to exert greater control on {@link Mapper}s.
+
+ @param theClass the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getPartitionerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Partitioner&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Partitioner} used to partition {@link Mapper}-outputs
+ to be sent to the {@link Reducer}s.
+
+ @return the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setPartitionerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Partitioner&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Partitioner} class used to partition
+ {@link Mapper}-outputs to be sent to the {@link Reducer}s.
+
+ @param theClass the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getReducerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Reducer} class for the job.
+
+ @return the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setReducerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Reducer} class for the job.
+
+ @param theClass the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getCombinerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers. Typically the combiner is same as the
+ the {@link Reducer} for the job i.e. {@link #getReducerClass()}.
+
+ @return the user-defined combiner class used to combine map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCombinerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"/>
+ <doc>
+ <![CDATA[Set the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers.
+
+ <p>The combiner is a task-level aggregation operation which, in some cases,
+ helps to cut down the amount of data transferred from the {@link Mapper} to
+ the {@link Reducer}, leading to better performance.</p>
+
+ <p>Typically the combiner is same as the <code>Reducer</code> for the
+ job i.e. {@link #setReducerClass(Class)}.</p>
+
+ @param theClass the user-defined combiner class used to combine
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCombineOnceOnly"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[If true, ensures the combiner is run once and only once on output from
+ the map. Otherwise, combiner may be run zero or more times.]]>
+ </doc>
+ </method>
+ <method name="getCombineOnceOnly" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on, else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getMapSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for map tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be
+ used for this job for map tasks,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for map tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for map tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getReduceSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for reduce tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used
+ for reduce tasks for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setReduceSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for reduce tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for reduce tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getNumMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job.
+ Defaults to <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumMapTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the number of map tasks for this job.
+
+ <p><i>Note</i>: This is only a <i>hint</i> to the framework. The actual
+ number of spawned map tasks depends on the number of {@link InputSplit}s
+ generated by the job's {@link InputFormat#getSplits(JobConf, int)}.
+
+ A custom {@link InputFormat} is typically used to accurately control
+ the number of map tasks for the job.</p>
+
+ <h4 id="NoOfMaps">How many maps?</h4>
+
+ <p>The number of maps is usually driven by the total size of the inputs
+ i.e. total number of blocks of the input files.</p>
+
+ <p>The right level of parallelism for maps seems to be around 10-100 maps
+ per-node, although it has been set up to 300 or so for very cpu-light map
+ tasks. Task setup takes awhile, so it is best if the maps take at least a
+ minute to execute.</p>
+
+ <p>The default behavior of file-based {@link InputFormat}s is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of input files. However, the {@link FileSystem} blocksize of the
+ input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Thus, if you expect 10TB of input data and have a blocksize of 128MB,
+ you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is
+ used to set it even higher.</p>
+
+ @param n the number of map tasks for this job.
+ @see InputFormat#getSplits(JobConf, int)
+ @see FileInputFormat
+ @see FileSystem#getDefaultBlockSize()
+ @see FileStatus#getBlockSize()]]>
+ </doc>
+ </method>
+ <method name="getNumReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job. Defaults to
+ <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumReduceTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the requisite number of reduce tasks for this job.
+
+ <h4 id="NoOfReduces">How many reduces?</h4>
+
+ <p>The right number of reduces seems to be <code>0.95</code> or
+ <code>1.75</code> multiplied by (&lt;<i>no. of nodes</i>&gt; *
+ <a href="{@docRoot}/../hadoop-default.html#mapred.tasktracker.reduce.tasks.maximum">
+ mapred.tasktracker.reduce.tasks.maximum</a>).
+ </p>
+
+ <p>With <code>0.95</code> all of the reduces can launch immediately and
+ start transfering map outputs as the maps finish. With <code>1.75</code>
+ the faster nodes will finish their first round of reduces and launch a
+ second wave of reduces doing a much better job of load balancing.</p>
+
+ <p>Increasing the number of reduces increases the framework overhead, but
+ increases load balancing and lowers the cost of failures.</p>
+
+ <p>The scaling factors above are slightly less than whole numbers to
+ reserve a few reduce slots in the framework for speculative-tasks, failures
+ etc.</p>
+
+ <h4 id="ReducerNone">Reducer NONE</h4>
+
+ <p>It is legal to set the number of reduce-tasks to <code>zero</code>.</p>
+
+ <p>In this case the output of the map-tasks directly go to distributed
+ file-system, to the path set by
+ {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Also, the
+ framework doesn't sort the map-outputs before writing it out to HDFS.</p>
+
+ @param n the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ map task, as specified by the <code>mapred.map.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ map task.
+
+ @param n the number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ reduce task, as specified by the <code>mapred.reduce.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ reduce task.
+
+ @param n the number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name. This is only used to identify the
+ job to the user.
+
+ @return the job's name, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified job name.
+
+ @param name the job's new name.]]>
+ </doc>
+ </method>
+ <method name="getSessionId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified session identifier. The default is the empty string.
+
+ The session identifier is used to tag metric data that is reported to some
+ performance metrics system via the org.apache.hadoop.metrics API. The
+ session identifier is intended, in particular, for use by Hadoop-On-Demand
+ (HOD) which allocates a virtual Hadoop cluster dynamically and transiently.
+ HOD will set the session identifier by modifying the hadoop-site.xml file
+ before starting the cluster.
+
+ When not running under HOD, this identifer is expected to remain set to
+ the empty string.
+
+ @return the session identifier, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setSessionId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sessionId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified session identifier.
+
+ @param sessionId the new session id.]]>
+ </doc>
+ </method>
+ <method name="setMaxTaskFailuresPerTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="noFailures" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds <code>noFailures</code>, the
+ tasktracker is <i>blacklisted</i> for this job.
+
+ @param noFailures maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxTaskFailuresPerTracker" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Expert: Get the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds this, the tasktracker is
+ <i>blacklisted</i> for this job.
+
+ @return the maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of map tasks that can fail without
+ the job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed map-task results in
+ the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the maximum percentage of map tasks that can fail without the
+ job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts} attempts
+ before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of reduce tasks that can fail without
+ the job being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed reduce-task results
+ in the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum percentage of reduce tasks that can fail without the job
+ being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="prio" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Set {@link JobPriority} for this job.
+
+ @param prio the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link JobPriority} for this job.
+
+ @return the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getProfileEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get whether the task profiling is enabled.
+ @return true if some tasks will be profiled]]>
+ </doc>
+ </method>
+ <method name="setProfileEnabled"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the system should collect profiler information for some of
+ the tasks in this job? The information is stored in the user log
+ directory.
+ @param newValue true means it should be gathered]]>
+ </doc>
+ </method>
+ <method name="getProfileParams" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the profiler configuration arguments.
+
+ The default value for this property is
+ "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s"
+
+ @return the parameters to pass to the task child to configure profiling]]>
+ </doc>
+ </method>
+ <method name="setProfileParams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the profiler configuration arguments. If the string contains a '%s' it
+ will be replaced with the name of the profiling output file when the task
+ runs.
+
+ This value is passed to the task child JVM on the command line.
+
+ @param value the configuration string]]>
+ </doc>
+ </method>
+ <method name="getProfileTaskRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <doc>
+ <![CDATA[Get the range of maps or reduces to profile.
+ @param isMap is the task a map?
+ @return the task ranges]]>
+ </doc>
+ </method>
+ <method name="setProfileTaskRange"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <param name="newValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the ranges of maps or reduces to profile. setProfileEnabled(true)
+ must also be called.
+ @param newValue a set of integer ranges of the map ids]]>
+ </doc>
+ </method>
+ <method name="setMapDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the map tasks fail.
+
+ <p>The debug script can aid debugging of failed map tasks. The script is
+ given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script needs to be symlinked. </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setMapDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param mDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getMapDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the map task's debug script.
+
+ @return the debug Script for the mapred job for failed map tasks.
+ @see #setMapDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="setReduceDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the reduce tasks fail.
+
+ <p>The debug script can aid debugging of failed reduce tasks. The script
+ is given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script file needs to be symlinked </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setReduceDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param rDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getReduceDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reduce task's debug Script
+
+ @return the debug script for the mapred job for failed reduce tasks.
+ @see #setReduceDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="getJobEndNotificationURI" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ @return the job end notification uri, <code>null</code> if it hasn't
+ been set.
+ @see #setJobEndNotificationURI(String)]]>
+ </doc>
+ </method>
+ <method name="setJobEndNotificationURI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ <p>The uri can contain 2 special parameters: <tt>$jobId</tt> and
+ <tt>$jobStatus</tt>. Those, if present, are replaced by the job's
+ identifier and completion-status respectively.</p>
+
+ <p>This is typically used by application-writers to implement chaining of
+ Map-Reduce jobs in an <i>asynchronous manner</i>.</p>
+
+ @param uri the job end notification uri
+ @see JobStatus
+ @see <a href="{@docRoot}/org/apache/hadoop/mapred/JobClient.html#JobCompletionAndChaining">Job Completion and Chaining</a>]]>
+ </doc>
+ </method>
+ <method name="getJobLocalDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get job-specific shared directory for use as scratch space
+
+ <p>
+ When a job starts, a shared directory is created at location
+ <code>
+ ${mapred.local.dir}/taskTracker/jobcache/$jobid/work/ </code>.
+ This directory is exposed to the users through
+ <code>job.local.dir </code>.
+ So, the tasks can use this space
+ as scratch space and share files among them. </p>
+ This value is available as System property also.
+
+ @return The localized job specific shared directory]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A map/reduce job configuration.
+
+ <p><code>JobConf</code> is the primary interface for a user to describe a
+ map-reduce job to the Hadoop framework for execution. The framework tries to
+ faithfully execute the job as-is described by <code>JobConf</code>, however:
+ <ol>
+ <li>
+ Some configuration parameters might have been marked as
+ <a href="{@docRoot}/org/apache/hadoop/conf/Configuration.html#FinalParams">
+ final</a> by administrators and hence cannot be altered.
+ </li>
+ <li>
+ While some job parameters are straight-forward to set
+ (e.g. {@link #setNumReduceTasks(int)}), some parameters interact subtly
+ rest of the framework and/or job-configuration and is relatively more
+ complex for the user to control finely (e.g. {@link #setNumMapTasks(int)}).
+ </li>
+ </ol></p>
+
+ <p><code>JobConf</code> typically specifies the {@link Mapper}, combiner
+ (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and
+ {@link OutputFormat} implementations to be used etc.
+
+ <p>Optionally <code>JobConf</code> is used to specify other advanced facets
+ of the job such as <code>Comparator</code>s to be used, files to be put in
+ the {@link DistributedCache}, whether or not intermediate and/or job outputs
+ are to be compressed (and how), debugability via user-provided scripts
+ ( {@link #setMapDebugScript(String)}/{@link #setReduceDebugScript(String)}),
+ for doing post-processing on task logs, task's stdout, stderr, syslog.
+ and etc.</p>
+
+ <p>Here is an example on how to configure a job via <code>JobConf</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ FileInputFormat.setInputPaths(job, new Path("in"));
+ FileOutputFormat.setOutputPath(job, new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setCombinerClass(MyJob.MyReducer.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ job.setInputFormat(SequenceFileInputFormat.class);
+ job.setOutputFormat(SequenceFileOutputFormat.class);
+ </pre></blockquote></p>
+
+ @see JobClient
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobConf -->
+ <!-- start interface org.apache.hadoop.mapred.JobConfigurable -->
+ <interface name="JobConfigurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Initializes a new instance from a {@link JobConf}.
+
+ @param job the configuration]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[That what may be configured.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobConfigurable -->
+ <!-- start class org.apache.hadoop.mapred.JobEndNotifier -->
+ <class name="JobEndNotifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobEndNotifier"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="startNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="stopNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="registerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ <method name="localRunnerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobEndNotifier -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory -->
+ <class name="JobHistory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="hostname" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Initialize JobHistory files.
+ @param conf Jobconf of the job tracker.
+ @param hostname jobtracker's hostname
+ @return true if intialized properly
+ false otherwise]]>
+ </doc>
+ </method>
+ <method name="parseHistoryFromFS"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="l" type="org.apache.hadoop.mapred.JobHistory.Listener"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parses history file and invokes Listener.handle() for
+ each line of history. It can be used for looking through history
+ files for specific items without having to keep whole history in memory.
+ @param path path to history file
+ @param l Listener for history events
+ @param fs FileSystem where history file is present
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isDisableHistory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns history disable status. by default history is enabled so this
+ method returns false.
+ @return true if history logging is disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="setDisableHistory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="disableHistory" type="boolean"/>
+ <doc>
+ <![CDATA[Enable/disable history logging. Default value is false, so history
+ is enabled by default.
+ @param disableHistory true if history should be disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <field name="JOBTRACKER_START_TIME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provides methods for writing to and reading from job history.
+ Job History works in an append mode, JobHistory and its inner classes provide methods
+ to log job events.
+
+ JobHistory is split into multiple files, format of each file is plain text where each line
+ is of the format [type (key=value)*], where type identifies the type of the record.
+ Type maps to UID of one of the inner classes of this class.
+
+ Job history is maintained in a master index which contains star/stop times of all jobs with
+ a few other job level properties. Apart from this each job's history is maintained in a seperate history
+ file. name of job history files follows the format jobtrackerId_jobid
+
+ For parsing the job history it supports a listener based interface where each line is parsed
+ and passed to listener. The listener can create an object model of history or look for specific
+ events and discard rest of the history.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <class name="JobHistory.HistoryCleaner" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobHistory.HistoryCleaner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Cleans up history data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Delete history files older than one month. Update master index and remove all
+ jobs older than one month. Also if a job tracker has no jobs in last one month
+ remove reference to the job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <class name="JobHistory.JobInfo" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.JobInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create new JobInfo]]>
+ </doc>
+ </constructor>
+ <method name="getAllTasks" return="java.util.Map&lt;java.lang.String, org.apache.hadoop.mapred.JobHistory.Task&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all map and reduce tasks <taskid-Task>.]]>
+ </doc>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Get the path of the locally stored job file
+ @param jobId id of the job
+ @return the path of the job file on the local file system]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFile" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the path of the job-history
+ log file.
+
+ @param logFile path of the job-history file
+ @return URL encoded path
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL encoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="decodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to decode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL decoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logSubmitted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="jobConfPath" type="java.lang.String"/>
+ <param name="submitTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="logSubmitted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="jobConfPath" type="java.lang.String"/>
+ <param name="submitTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Log job submitted event to history. Creates a new file in history
+ for the job. if history file creation fails, it disables history
+ for all other events.
+ @param jobId job id assigned by job tracker.
+ @param jobConf job conf of the job
+ @param jobConfPath path to job conf xml file in HDFS.
+ @param submitTime time when job tracker received the job
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs launch time of job.
+ @param jobId job id, assigned by jobtracker.
+ @param startTime start time of job.
+ @param totalMaps total maps assigned by jobtracker.
+ @param totalReduces total reduces.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <param name="failedMaps" type="int"/>
+ <param name="failedReduces" type="int"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="finishTime" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <param name="failedMaps" type="int"/>
+ <param name="failedReduces" type="int"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log job finished. closes the job file in history.
+ @param jobId job id, assigned by jobtracker.
+ @param finishTime finish time of job in ms.
+ @param finishedMaps no of maps successfully finished.
+ @param finishedReduces no of reduces finished sucessfully.
+ @param failedMaps no of failed map tasks.
+ @param failedReduces no of failed reduce tasks.
+ @param counters the counters from the job]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs job failed event. Closes the job history log file.
+ @param jobid job id
+ @param timestamp time when job failure was detected in ms.
+ @param finishedMaps no finished map tasks.
+ @param finishedReduces no of finished reduce tasks.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to job start, finish or failure.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <class name="JobHistory.Keys" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.Keys&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Keys[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Keys"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Job history files contain key="value" pairs, where keys belong to this enum.
+ It acts as a global namespace for all keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <!-- start interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <interface name="JobHistory.Listener" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="handle"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recType" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"/>
+ <param name="values" type="java.util.Map&lt;org.apache.hadoop.mapred.JobHistory.Keys, java.lang.String&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Callback method for history parser.
+ @param recType type of record, which is the first entry in the line.
+ @param values a map of key-value pairs as thry appear in history.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Callback interface for reading back log events from JobHistory. This interface
+ should be implemented and passed to JobHistory.parseHistory()]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <class name="JobHistory.MapAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.MapAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of this map task attempt.
+ @param taskAttemptId task attempt id
+ @param startTime start time of task attempt as reported by task tracker.
+ @param hostName host name of the task attempt.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="host" type="java.lang.String"/>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finish time of map task attempt.
+ @param taskAttemptId task attempt id
+ @param finishTime finish time
+ @param hostName host name]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="host" type="java.lang.String"/>
+ <param name="err" type="java.lang.String"/>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt failed event.
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostname" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt killed event.
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <class name="JobHistory.RecordTypes" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.RecordTypes&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.RecordTypes[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Record types are identifiers for each line of log in history files.
+ A record type appears as the first token in a single line of log.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <class name="JobHistory.ReduceAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.ReduceAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of Reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param startTime start time
+ @param hostName host name]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostname" type="java.lang.String"/>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finished event of this task.
+ @param taskAttemptId task attempt id
+ @param shuffleFinished shuffle finish time
+ @param sortFinished sort finish time
+ @param finishTime finish time of task
+ @param hostName host name where task attempt executed]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostname" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log failed reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostname" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log killed reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Task -->
+ <class name="JobHistory.Task" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.Task"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="splitLocations" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of task (TIP).
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param startTime startTime of tip.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finish time of task.
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param finishTime finish timeof task in ms]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log job failed event.
+ @param taskId task id
+ @param taskType MAP or REDUCE.
+ @param time timestamp when job failed detected.
+ @param error error message for failure.]]>
+ </doc>
+ </method>
+ <method name="getTaskAttempts" return="java.util.Map&lt;java.lang.String, org.apache.hadoop.mapred.JobHistory.TaskAttempt&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all task attempts for this task. <task attempt id - TaskAttempt>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to Task's start, finish or failure.
+ All events logged by this class are logged in a separate file per job in
+ job tracker history. These events map to TIPs in jobtracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Task -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <class name="JobHistory.TaskAttempt" extends="org.apache.hadoop.mapred.JobHistory.Task"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.TaskAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Base class for Map and Reduce TaskAttempts.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Values -->
+ <class name="JobHistory.Values" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.Values&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Values[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Values"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[This enum contains some of the values commonly used by history log events.
+ since values in history can only be strings - Values.name() is used in
+ most places in history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Values -->
+ <!-- start class org.apache.hadoop.mapred.JobID -->
+ <class name="JobID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobID" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a JobID object
+ @param jtIdentifier jobTracker identifier
+ @param id job number]]>
+ </doc>
+ </constructor>
+ <method name="getJtIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare JobIds by first jtIdentifiers, then by job numbers]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a JobId object from given string
+ @return constructed JobId object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getJobIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>any job</i>
+ run on the jobtracker started at <i>200707121733</i>, we would use :
+ <pre>
+ JobID.getTaskIDsPattern("200707121733", null);
+ </pre>
+ which will return :
+ <pre> "job_200707121733_[0-9]*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @return a regex pattern matching JobIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[JobID represents the immutable and unique identifier for
+ the job. JobID consists of two parts. First part
+ represents the jobtracker identifier, so that jobID to jobtracker map
+ is defined. For cluster setup this string is the jobtracker
+ start time, for local setting, it is "local".
+ Second part of the JobID is the job number. <br>
+ An example JobID is :
+ <code>job_200707121733_0003</code> , which represents the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse JobID strings, but rather
+ use appropriate constructors or {@link #forName(String)} method.
+
+ @see TaskID
+ @see TaskAttemptID
+ @see JobTracker#getNewJobId()
+ @see JobTracker#getStartTime()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobID -->
+ <!-- start class org.apache.hadoop.mapred.JobPriority -->
+ <class name="JobPriority" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobPriority&gt;"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobPriority[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Used to describe the priority of the running job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobPriority -->
+ <!-- start class org.apache.hadoop.mapred.JobProfile -->
+ <class name="JobProfile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobProfile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an empty {@link JobProfile}.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, org.apache.hadoop.mapred.JobID, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a {@link JobProfile} the userid, jobid,
+ job config-file, job-details url and job name.
+
+ @param user userid of the person who submitted the job.
+ @param jobid id of the job.
+ @param jobFile job configuration file.
+ @param url link to the web-ui for details of the job.
+ @param name user-specified job name.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="use JobProfile(String, JobID, String, String, String) instead">
+ <doc>
+ <![CDATA[@deprecated use JobProfile(String, JobID, String, String, String) instead]]>
+ </doc>
+ </constructor>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user id.]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job id.]]>
+ </doc>
+ </method>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use getJobID() instead">
+ <doc>
+ <![CDATA[@deprecated use getJobID() instead]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configuration file for the job.]]>
+ </doc>
+ </method>
+ <method name="getURL" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the link to the web-ui for details of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A JobProfile is a MapReduce primitive. Tracks a job,
+ whether living or dead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobProfile -->
+ <!-- start class org.apache.hadoop.mapred.JobShell -->
+ <class name="JobShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run method from Tool]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Provide command line parsing for JobSubmission
+ job submission looks like
+ hadoop jar -libjars <comma seperated jars> -archives <comma seperated archives>
+ -files <comma seperated files> inputjar args]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobShell -->
+ <!-- start class org.apache.hadoop.mapred.JobStatus -->
+ <class name="JobStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobStatus" type="java.lang.String, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job]]>
+ </doc>
+ </constructor>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use getJobID instead">
+ <doc>
+ <![CDATA[@deprecated use getJobID instead]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The jobid of the Job]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in maps]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in reduce]]>
+ </doc>
+ </method>
+ <method name="getRunState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return running state of the job]]>
+ </doc>
+ </method>
+ <method name="setRunState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Change the current run state of the job.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return start time of the job]]>
+ </doc>
+ </method>
+ <method name="getUsername" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the username of the job]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SUCCEEDED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PREP" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Describes the current status of a job. This is
+ not intended to be a comprehensive piece of data.
+ For that, look at JobProfile.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobStatus -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker -->
+ <class name="JobTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.InterTrackerProtocol"/>
+ <implements name="org.apache.hadoop.mapred.JobSubmissionProtocol"/>
+ <method name="startTracker" return="org.apache.hadoop.mapred.JobTracker"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker with given configuration.
+
+ The conf will be modified to reflect the actual ports on which
+ the JobTracker is up and running if the user passes the port as
+ <code>zero</code>.
+
+ @param conf configuration for the JobTracker.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="offerService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Run forever]]>
+ </doc>
+ </method>
+ <method name="getTotalSubmissions" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobTrackerMachine" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTrackerIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the unique identifier (ie. timestamp) of this job tracker start.
+ @return a string with a unique identifier]]>
+ </doc>
+ </method>
+ <method name="getTrackerPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="runningJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRunningJobs" return="java.util.List&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version that is called from a timer thread, and therefore needs to be
+ careful to synchronize.]]>
+ </doc>
+ </method>
+ <method name="failedJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="completedJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="taskTrackers" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskTracker" return="org.apache.hadoop.mapred.TaskTrackerStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trackerID" type="java.lang.String"/>
+ </method>
+ <method name="resolveAndAddToTopology" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getNodesAtMaxLevel" return="java.util.Collection&lt;org.apache.hadoop.net.Node&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a collection of nodes at the max level]]>
+ </doc>
+ </method>
+ <method name="getParentNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <param name="level" type="int"/>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the Node in the network topology that corresponds to the hostname]]>
+ </doc>
+ </method>
+ <method name="getNumTaskCacheLevels" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumResolvedTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="heartbeat" return="org.apache.hadoop.mapred.HeartbeatResponse"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskTrackerStatus"/>
+ <param name="initialContact" type="boolean"/>
+ <param name="acceptNewTasks" type="boolean"/>
+ <param name="responseId" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The periodic heartbeat mechanism between the {@link TaskTracker} and
+ the {@link JobTracker}.
+
+ The {@link JobTracker} processes the status information sent by the
+ {@link TaskTracker} and responds with instructions to start/stop
+ tasks or jobs, and also 'reset' instructions during contingencies.]]>
+ </doc>
+ </method>
+ <method name="getFilesystemName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Grab the local fs name]]>
+ </doc>
+ </method>
+ <method name="reportTaskTrackerError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTracker" type="java.lang.String"/>
+ <param name="errorClass" type="java.lang.String"/>
+ <param name="errorMessage" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNewJobId" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Allocates a new JobId string.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[JobTracker.submitJob() kicks off a new job.
+
+ Create a 'JobInProgress' object, which contains both JobProfile
+ and JobStatus. Those two sub-objects are sometimes shipped outside
+ of the JobTracker. But JobInProgress adds info that's useful for
+ the JobTracker alone.
+
+ We add the JIP to the jobInitQueue, which is processed
+ asynchronously to handle split-computation and build up
+ the right TaskTracker/Block mapping.]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ </method>
+ <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ </method>
+ <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ </method>
+ <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="fromid" type="int"/>
+ <param name="maxevents" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxEvents" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="tipid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the diagnostics for a given task
+ @param taskId the id of the task
+ @return an array of the diagnostic messages]]>
+ </doc>
+ </method>
+ <method name="getTip" return="org.apache.hadoop.mapred.TaskInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tipid" type="org.apache.hadoop.mapred.TaskID"/>
+ <doc>
+ <![CDATA[Returns specified TaskInProgress, or null.]]>
+ </doc>
+ </method>
+ <method name="killTask" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="killTask" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a Task to be killed]]>
+ </doc>
+ </method>
+ <method name="getAssignedTracker" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ </method>
+ <method name="getAssignedTracker" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Get tracker name for a given task id.
+ @param taskId the name of the task
+ @return The name of the task tracker]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSystemDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@see org.apache.hadoop.mapred.JobSubmissionProtocol#getSystemDir()]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.JobInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.JobInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Get the localized job file path on the job trackers local file system
+ @param jobId id of the job
+ @return the path of the job conf file on the local file system]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker process. This is used only for debugging. As a rule,
+ JobTracker should be run as part of the DFS Namenode process.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[JobTracker is the central location for submitting and
+ tracking MR jobs in a network environment.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <class name="JobTracker.IllegalStateException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobTracker.IllegalStateException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A client tried to submit a job before the Job Tracker was ready.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.State -->
+ <class name="JobTracker.State" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobTracker.State&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobTracker.State[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.State -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <class name="KeyValueLineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="KeyValueLineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="findSeparator" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="sep" type="byte"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class treats a line in the input as a key/value pair separated by a
+ separator character. The separator can be specified in config file
+ under the attribute name key.value.separator.in.input.line. The default
+ separator is the tab character ('\t').]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <class name="KeyValueTextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="KeyValueTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Each line
+ is divided into key and value parts by a separator byte. If no such a byte
+ exists, the key will be the entire line and value will be empty.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader -->
+ <class name="LineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="LineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.LongWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.LongWritable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the progress within the split]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Treats keys as offset in file and value as line.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <class name="LineRecordReader.LineReader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LineRecordReader.LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ <code>io.file.buffer.size</code> specified in the given
+ <code>Configuration</code>.
+ @param in input stream
+ @param conf configuration
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the underlying stream.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <param name="maxLineLength" type="int"/>
+ <param name="maxBytesToConsume" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @param maxLineLength the maximum number of bytes to store into str.
+ @param maxBytesToConsume the maximum number of bytes to consume in this call.
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <param name="maxLineLength" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @param maxLineLength the maximum number of bytes to store into str.
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides a line reader from an input stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <!-- start class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <class name="MapFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.MapFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getEntry" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readers" type="org.apache.hadoop.io.MapFile.Reader[]"/>
+ <param name="partitioner" type="org.apache.hadoop.mapred.Partitioner&lt;K, V&gt;"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an entry from output generated by this class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link MapFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.Mapper -->
+ <interface name="Mapper" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1"/>
+ <param name="value" type="V1"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Maps a single input key/value pair into an intermediate key/value pair.
+
+ <p>Output pairs need not be of the same types as input pairs. A given
+ input pair may map to zero or many output pairs. Output pairs are
+ collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the input key.
+ @param value the input value.
+ @param output collects mapped keys and values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Maps input key/value pairs to a set of intermediate key/value pairs.
+
+ <p>Maps are the individual tasks which transform input records into a
+ intermediate records. The transformed intermediate records need not be of
+ the same type as the input records. A given input pair may map to zero or
+ many output pairs.</p>
+
+ <p>The Hadoop Map-Reduce framework spawns one map task for each
+ {@link InputSplit} generated by the {@link InputFormat} for the job.
+ <code>Mapper</code> implementations can access the {@link JobConf} for the
+ job via the {@link JobConfigurable#configure(JobConf)} and initialize
+ themselves. Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p>The framework then calls
+ {@link #map(Object, Object, OutputCollector, Reporter)}
+ for each key/value pair in the <code>InputSplit</code> for that task.</p>
+
+ <p>All intermediate values associated with a given output key are
+ subsequently grouped by the framework, and passed to a {@link Reducer} to
+ determine the final output. Users can control the grouping by specifying
+ a <code>Comparator</code> via
+ {@link JobConf#setOutputKeyComparatorClass(Class)}.</p>
+
+ <p>The grouped <code>Mapper</code> outputs are partitioned per
+ <code>Reducer</code>. Users can control which keys (and hence records) go to
+ which <code>Reducer</code> by implementing a custom {@link Partitioner}.
+
+ <p>Users can optionally specify a <code>combiner</code>, via
+ {@link JobConf#setCombinerClass(Class)}, to perform local aggregation of the
+ intermediate outputs, which helps to cut down the amount of data transferred
+ from the <code>Mapper</code> to the <code>Reducer</code>.
+
+ <p>The intermediate, grouped outputs are always stored in
+ {@link SequenceFile}s. Applications can specify if and how the intermediate
+ outputs are to be compressed and which {@link CompressionCodec}s are to be
+ used via the <code>JobConf</code>.</p>
+
+ <p>If the job has
+ <a href="{@docRoot}/org/apache/hadoop/mapred/JobConf.html#ReducerNone">zero
+ reduces</a> then the output of the <code>Mapper</code> is directly written
+ to the {@link FileSystem} without grouping by keys.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyMapper&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Mapper&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String mapTaskId;
+ private String inputFile;
+ private int noRecords = 0;
+
+ public void configure(JobConf job) {
+ mapTaskId = job.get("mapred.task.id");
+ inputFile = job.get("mapred.input.file");
+ }
+
+ public void map(K key, V val,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ // reporter.progress();
+
+ // Process some more
+ // ...
+ // ...
+
+ // Increment the no. of &lt;key, value&gt; pairs processed
+ ++noRecords;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 records update application-level status
+ if ((noRecords%100) == 0) {
+ reporter.setStatus(mapTaskId + " processed " + noRecords +
+ " from input-file: " + inputFile);
+ }
+
+ // Output the result
+ output.collect(key, val);
+ }
+ }
+ </pre></blockquote></p>
+
+ <p>Applications may write a custom {@link MapRunnable} to exert greater
+ control on map processing e.g. multi-threaded <code>Mapper</code>s etc.</p>
+
+ @see JobConf
+ @see InputFormat
+ @see Partitioner
+ @see Reducer
+ @see MapReduceBase
+ @see MapRunnable
+ @see SequenceFile]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Mapper -->
+ <!-- start class org.apache.hadoop.mapred.MapReduceBase -->
+ <class name="MapReduceBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="MapReduceBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for {@link Mapper} and {@link Reducer} implementations.
+
+ <p>Provides default no-op implementations for a few methods, most non-trivial
+ applications need to override some of them.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapReduceBase -->
+ <!-- start interface org.apache.hadoop.mapred.MapRunnable -->
+ <interface name="MapRunnable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start mapping input <tt>&lt;key, value&gt;</tt> pairs.
+
+ <p>Mapping of input records to output records is complete when this method
+ returns.</p>
+
+ @param input the {@link RecordReader} to read the input records.
+ @param output the {@link OutputCollector} to collect the outputrecords.
+ @param reporter {@link Reporter} to report progress, status-updates etc.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Expert: Generic interface for {@link Mapper}s.
+
+ <p>Custom implementations of <code>MapRunnable</code> can exert greater
+ control on map processing e.g. multi-threaded, asynchronous mappers etc.</p>
+
+ @see Mapper]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.MapRunnable -->
+ <!-- start class org.apache.hadoop.mapred.MapRunner -->
+ <class name="MapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="MapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Default {@link MapRunnable} implementation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapRunner -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <class name="MultiFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;K, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultiFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s
+ in {@link #getSplits(JobConf, int)} method. Splits are constructed from
+ the files under the input paths. Each split returned contains <i>nearly</i>
+ equal content length. <br>
+ Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)}
+ to construct <code>RecordReader</code>'s for <code>MultiFileSplit</code>'s.
+ @see MultiFileSplit]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileSplit -->
+ <class name="MultiFileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="MultiFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLengths" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array containing the lengths of the files in
+ the split]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the length of the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getNumPaths" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all the Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A sub-collection of input files. Unlike {@link FileSplit}, MultiFileSplit
+ class does not represent a split of a file, but a split of input files
+ into smaller sets. The atomic unit of split is a file. <br>
+ MultiFileSplit can be used to implement {@link RecordReader}'s, with
+ reading one record per file.
+ @see FileSplit
+ @see MultiFileInputFormat]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileSplit -->
+ <!-- start interface org.apache.hadoop.mapred.OutputCollector -->
+ <interface name="OutputCollector" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="collect"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Adds a key/value pair to the output.
+
+ @param key the key to collect.
+ @param value to value to collect.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Collects the <code>&lt;key, value&gt;</code> pairs output by {@link Mapper}s
+ and {@link Reducer}s.
+
+ <p><code>OutputCollector</code> is the generalization of the facility
+ provided by the Map-Reduce framework to collect data output by either the
+ <code>Mapper</code> or the <code>Reducer</code> i.e. intermediate outputs
+ or the output of the job.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputCollector -->
+ <!-- start interface org.apache.hadoop.mapred.OutputFormat -->
+ <interface name="OutputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordWriter} for the given job.
+
+ @param ignored
+ @param job configuration for the job whose output is being written.
+ @param name the unique name for this part of the output.
+ @param progress mechanism for reporting progress while writing to file.
+ @return a {@link RecordWriter} to write the output for the job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check for validity of the output-specification for the job.
+
+ <p>This is to validate the output specification for the job when it is
+ a job is submitted. Typically checks that it does not already exist,
+ throwing an exception when it already exists, so that output is not
+ overwritten.</p>
+
+ @param ignored
+ @param job job configuration.
+ @throws IOException when output should not be attempted]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>OutputFormat</code> describes the output-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the output-specification of the job. For e.g. check that the
+ output directory doesn't already exist.
+ <li>
+ Provide the {@link RecordWriter} implementation to be used to write out
+ the output files of the job. Output files are stored in a
+ {@link FileSystem}.
+ </li>
+ </ol>
+
+ @see RecordWriter
+ @see JobConf]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.OutputFormatBase -->
+ <class name="OutputFormatBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileOutputFormat}">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="OutputFormatBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setCompressOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+ <code>false</code> otherwise]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+ compress the job outputs]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the
+ job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A base class for {@link OutputFormat}.
+ @deprecated Use {@link FileOutputFormat}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputFormatBase -->
+ <!-- start class org.apache.hadoop.mapred.OutputLogFilter -->
+ <class name="OutputLogFilter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.PathFilter"/>
+ <constructor name="OutputLogFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <doc>
+ <![CDATA[This class filters log files from directory given
+ It doesnt accept paths having _logs.
+ This can be used to list paths of output directory as follows:
+ Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
+ new OutputLogFilter()));]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputLogFilter -->
+ <!-- start interface org.apache.hadoop.mapred.Partitioner -->
+ <interface name="Partitioner" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numPartitions" type="int"/>
+ <doc>
+ <![CDATA[Get the paritition number for a given key (hence record) given the total
+ number of partitions i.e. number of reduce-tasks for the job.
+
+ <p>Typically a hash function on a all or a subset of the key.</p>
+
+ @param key the key to be paritioned.
+ @param value the entry value.
+ @param numPartitions the total number of partitions.
+ @return the partition number for the <code>key</code>.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partitions the key space.
+
+ <p><code>Partitioner</code> controls the partitioning of the keys of the
+ intermediate map-outputs. The key (or a subset of the key) is used to derive
+ the partition, typically by a hash function. The total number of partitions
+ is the same as the number of reduce tasks for the job. Hence this controls
+ which of the <code>m</code> reduce tasks the intermediate key (and hence the
+ record) is sent for reduction.</p>
+
+ @see Reducer]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Partitioner -->
+ <!-- start interface org.apache.hadoop.mapred.RecordReader -->
+ <interface name="RecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the next key/value pair from the input for processing.
+
+ @param key the key to read data into
+ @param value the value to read data into
+ @return true iff a key/value was read, false if at EOF]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a key.
+
+ @return a new key object.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a value.
+
+ @return a new value object.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current position in the input.
+
+ @return the current position in the input.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this {@link InputSplit} to future operations.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[How much of the input has the {@link RecordReader} consumed i.e.
+ has been processed by?
+
+ @return progress from <code>0.0</code> to <code>1.0</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordReader</code> reads &lt;key, value&gt; pairs from an
+ {@link InputSplit}.
+
+ <p><code>RecordReader</code>, typically, converts the byte-oriented view of
+ the input, provided by the <code>InputSplit</code>, and presents a
+ record-oriented view for the {@link Mapper} & {@link Reducer} tasks for
+ processing. It thus assumes the responsibility of processing record
+ boundaries and presenting the tasks with keys and values.</p>
+
+ @see InputSplit
+ @see InputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordReader -->
+ <!-- start interface org.apache.hadoop.mapred.RecordWriter -->
+ <interface name="RecordWriter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes a key/value pair.
+
+ @param key the key to write.
+ @param value the value to write.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this <code>RecordWriter</code> to future operations.
+
+ @param reporter facility to report progress.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordWriter</code> writes the output &lt;key, value&gt; pairs
+ to an output file.
+
+ <p><code>RecordWriter</code> implementations write the job outputs to the
+ {@link FileSystem}.
+
+ @see OutputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordWriter -->
+ <!-- start interface org.apache.hadoop.mapred.Reducer -->
+ <interface name="Reducer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="values" type="java.util.Iterator&lt;V2&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K3, V3&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<i>Reduces</i> values for a given key.
+
+ <p>The framework calls this method for each
+ <code>&lt;key, (list of values)></code> pair in the grouped inputs.
+ Output values must be of the same type as input values. Input keys must
+ not be altered. The framework will <b>reuse</b> the key and value objects
+ that are passed into the reduce, therefore the application should clone
+ the objects they want to keep a copy of. In many cases, all values are
+ combined into zero or one value.
+ </p>
+
+ <p>Output pairs are collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the key.
+ @param values the list of values to reduce.
+ @param output to collect keys and combined values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reduces a set of intermediate values which share a key to a smaller set of
+ values.
+
+ <p>The number of <code>Reducer</code>s for the job is set by the user via
+ {@link JobConf#setNumReduceTasks(int)}. <code>Reducer</code> implementations
+ can access the {@link JobConf} for the job via the
+ {@link JobConfigurable#configure(JobConf)} method and initialize themselves.
+ Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p><code>Reducer</code> has 3 primary phases:</p>
+ <ol>
+ <li>
+
+ <h4 id="Shuffle">Shuffle</h4>
+
+ <p><code>Reducer</code> is input the grouped output of a {@link Mapper}.
+ In the phase the framework, for each <code>Reducer</code>, fetches the
+ relevant partition of the output of all the <code>Mapper</code>s, via HTTP.
+ </p>
+ </li>
+
+ <li>
+ <h4 id="Sort">Sort</h4>
+
+ <p>The framework groups <code>Reducer</code> inputs by <code>key</code>s
+ (since different <code>Mapper</code>s may have output the same key) in this
+ stage.</p>
+
+ <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
+ being fetched they are merged.</p>
+
+ <h5 id="SecondarySort">SecondarySort</h5>
+
+ <p>If equivalence rules for keys while grouping the intermediates are
+ different from those for grouping keys before reduction, then one may
+ specify a <code>Comparator</code> via
+ {@link JobConf#setOutputValueGroupingComparator(Class)}.Since
+ {@link JobConf#setOutputKeyComparatorClass(Class)} can be used to
+ control how intermediate keys are grouped, these can be used in conjunction
+ to simulate <i>secondary sort on values</i>.</p>
+
+
+ For example, say that you want to find duplicate web pages and tag them
+ all with the url of the "best" known example. You would set up the job
+ like:
+ <ul>
+ <li>Map Input Key: url</li>
+ <li>Map Input Value: document</li>
+ <li>Map Output Key: document checksum, url pagerank</li>
+ <li>Map Output Value: url</li>
+ <li>Partitioner: by checksum</li>
+ <li>OutputKeyComparator: by checksum and then decreasing pagerank</li>
+ <li>OutputValueGroupingComparator: by checksum</li>
+ </ul>
+ </li>
+
+ <li>
+ <h4 id="Reduce">Reduce</h4>
+
+ <p>In this phase the
+ {@link #reduce(Object, Iterator, OutputCollector, Reporter)}
+ method is called for each <code>&lt;key, (list of values)></code> pair in
+ the grouped inputs.</p>
+ <p>The output of the reduce task is typically written to the
+ {@link FileSystem} via
+ {@link OutputCollector#collect(Object, Object)}.</p>
+ </li>
+ </ol>
+
+ <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyReducer&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Reducer&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String reduceTaskId;
+ private int noKeys = 0;
+
+ public void configure(JobConf job) {
+ reduceTaskId = job.get("mapred.task.id");
+ }
+
+ public void reduce(K key, Iterator&lt;V&gt; values,
+ OutputCollector&lt;K, V&gt; output,
+ Reporter reporter)
+ throws IOException {
+
+ // Process
+ int noValues = 0;
+ while (values.hasNext()) {
+ V value = values.next();
+
+ // Increment the no. of values for this key
+ ++noValues;
+
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ if ((noValues%10) == 0) {
+ reporter.progress();
+ }
+
+ // Process some more
+ // ...
+ // ...
+
+ // Output the &lt;key, value&gt;
+ output.collect(key, value);
+ }
+
+ // Increment the no. of &lt;key, list of values&gt; pairs processed
+ ++noKeys;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 keys update application-level status
+ if ((noKeys%100) == 0) {
+ reporter.setStatus(reduceTaskId + " processed " + noKeys);
+ }
+ }
+ }
+ </pre></blockquote></p>
+
+ @see Mapper
+ @see Partitioner
+ @see Reporter
+ @see MapReduceBase]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reducer -->
+ <!-- start interface org.apache.hadoop.mapred.Reporter -->
+ <interface name="Reporter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Progressable"/>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the status description for the task.
+
+ @param status brief description of the current status.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the key, which can be of
+ any {@link Enum} type, by the specified amount.
+
+ @param key key to identify the counter to be incremented. The key can be
+ be any <code>Enum</code>.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="counter" type="java.lang.String"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the group and counter name
+ by the specified amount.
+
+ @param group name to identify the group of the counter to be incremented.
+ @param counter name to identify the counter within the group.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="getInputSplit" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+ <doc>
+ <![CDATA[Get the {@link InputSplit} object for a map.
+
+ @return the <code>InputSplit</code> that the map is reading from.
+ @throws UnsupportedOperationException if called outside a mapper]]>
+ </doc>
+ </method>
+ <field name="NULL" type="org.apache.hadoop.mapred.Reporter"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A constant of Reporter type that does nothing.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A facility for Map-Reduce applications to report progress and update
+ counters, status information etc.
+
+ <p>{@link Mapper} and {@link Reducer} can use the <code>Reporter</code>
+ provided to report progress or just indicate that they are alive. In
+ scenarios where the application takes an insignificant amount of time to
+ process individual key/value pairs, this is crucial since the framework
+ might assume that the task has timed-out and kill that task.
+
+ <p>Applications can also update {@link Counters} via the provided
+ <code>Reporter</code> .</p>
+
+ @see Progressable
+ @see Counters]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reporter -->
+ <!-- start interface org.apache.hadoop.mapred.RunningJob -->
+ <interface name="RunningJob" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job identifier.
+
+ @return the job identifier.]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="This method is deprecated and will be removed. Applications should
+ rather use {@link #getID()}.">
+ <doc>
+ <![CDATA[@deprecated This method is deprecated and will be removed. Applications should
+ rather use {@link #getID()}.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the job.
+
+ @return the name of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the path of the submitted job configuration.
+
+ @return the path of the submitted job configuration.]]>
+ </doc>
+ </method>
+ <method name="getTrackingURL" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the URL where some job progress information will be displayed.
+
+ @return the URL where some job progress information will be displayed.]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's map-tasks, as a float between 0.0
+ and 1.0. When all map tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's map-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0
+ and 1.0. When all reduce tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's reduce-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isComplete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job is finished or not.
+ This is a non-blocking call.
+
+ @return <code>true</code> if the job is complete, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isSuccessful" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job completed successfully.
+
+ @return <code>true</code> if the job succeeded, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="waitForCompletion"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Blocks until the job is complete.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill the running job. Blocks until all job tasks have been
+ killed as well. If the job is no longer running, it simply returns.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="startFrom" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get events indicating completion (success/failure) of component tasks.
+
+ @param startFrom index to start fetching events from
+ @return an array of {@link TaskCompletionEvent}s
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill indicated task attempt.
+
+ @param taskId the id of the task to be terminated.
+ @param shouldFail if true the task is failed and added to failed tasks
+ list, otherwise it is just killed, w/o affecting
+ job failure status.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #killTask(TaskAttemptID, boolean)}">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the counters for this job.
+
+ @return the counters for this job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RunningJob</code> is the user-interface to query for details on a
+ running Map-Reduce job.
+
+ <p>Clients can get hold of <code>RunningJob</code> via the {@link JobClient}
+ and then query the running-job for details such as name, configuration,
+ progress etc.</p>
+
+ @see JobClient]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RunningJob -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <class name="SequenceFileAsBinaryInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[InputFormat reading keys, values from SequenceFiles in binary (raw)
+ format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <class name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"/>
+ <constructor name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the key class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getKeyClassName]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the value class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getValueClassName]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.BytesWritable"/>
+ <param name="val" type="org.apache.hadoop.io.BytesWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read raw bytes from a SequenceFile.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Read records from a SequenceFile as binary (raw) bytes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+ <class name="SequenceFileAsBinaryOutputFormat" extends="org.apache.hadoop.mapred.SequenceFileOutputFormat&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setSequenceFileOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the {@link SequenceFile}
+ <p>This allows the user to specify the key class to be different
+ from the actual class ({@link BytesWritable}) used for writing </p>
+
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+ </doc>
+ </method>
+ <method name="setSequenceFileOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for the {@link SequenceFile}
+ <p>This allows the user to specify the value class to be different
+ from the actual class ({@link BytesWritable}) used for writing </p>
+
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+ </doc>
+ </method>
+ <method name="getSequenceFileOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the key class for the {@link SequenceFile}
+
+ @return the key class of the {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <method name="getSequenceFileOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the value class for the {@link SequenceFile}
+
+ @return the value class of the {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes keys, values to
+ {@link SequenceFile}s in binary(raw) format]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
+ <class name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" type="org.apache.hadoop.io.BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.BytesWritable"/>
+ </method>
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Inner class used for appendRaw]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <class name="SequenceFileAsTextInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is similar to SequenceFileInputFormat, except it generates SequenceFileAsTextRecordReader
+ which converts the input keys and values to their String forms by calling toString() method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <class name="SequenceFileAsTextRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="SequenceFileAsTextRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class converts the input keys and values to their String forms by calling toString()
+ method. This class to SequenceFileAsTextInputFormat class is as LineRecordReader
+ class to TextInputFormat class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <class name="SequenceFileInputFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a record reader for the given split
+ @param split file split
+ @param job job configuration
+ @param reporter reporter who sends report to task tracker
+ @return RecordReader]]>
+ </doc>
+ </method>
+ <method name="setFilterClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="filterClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[set the filter class
+
+ @param conf application configuration
+ @param filterClass filter class]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that allows a map/red job to work on a sample of sequence files.
+ The sample is decided by the filter class set by the job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <!-- start interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <interface name="SequenceFileInputFilter.Filter" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[filter function
+ Decide if a record should be filtered or not
+ @param key record key
+ @return true if a record is accepted; return false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[filter interface]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <class name="SequenceFileInputFilter.FilterBase" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.SequenceFileInputFilter.Filter"/>
+ <constructor name="SequenceFileInputFilter.FilterBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[base class for Filters]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <class name="SequenceFileInputFilter.MD5Filter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.MD5Filter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the filtering frequency in configuration
+
+ @param conf configuration
+ @param frequency filtering frequency]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter according to configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If MD5(key) % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class returns a set of records by examing the MD5 digest of its
+ key against a filtering frequency <i>f</i>. The filtering criteria is
+ MD5(key) % f == 0.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <class name="SequenceFileInputFilter.PercentFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.PercentFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the frequency and stores it in conf
+ @param conf configuration
+ @param frequency filtering frequencey]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter by checking the configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If record# % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class returns a percentage of records
+ The percentage is determined by a filtering frequency <i>f</i> using
+ the criteria record# % f == 0.
+ For example, if the frequency is 10, one out of 10 records is returned.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <class name="SequenceFileInputFilter.RegexFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.RegexFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setPattern"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="regex" type="java.lang.String"/>
+ <exception name="PatternSyntaxException" type="java.util.regex.PatternSyntaxException"/>
+ <doc>
+ <![CDATA[Define the filtering regex and stores it in conf
+ @param conf where the regex is set
+ @param regex regex used as a filter]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the Filter by checking the configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If key matches the regex, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Records filter by matching key to regex]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <class name="SequenceFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="listPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <class name="SequenceFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.SequenceFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf}
+ @return the {@link CompressionType} for the output {@link SequenceFile},
+ defaulting to {@link CompressionType#RECORD}]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf} to modify
+ @param style the {@link CompressionType} for the output
+ {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <class name="SequenceFileRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"/>
+ <constructor name="SequenceFileRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of key that must be passed to {@link
+ #next(Object, Object)}..]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of value that must be passed to {@link
+ #next(Object, Object)}..]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="conf" type="org.apache.hadoop.conf.Configuration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An {@link RecordReader} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer -->
+ <class name="StatusHttpServer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer" type="java.lang.String, java.lang.String, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a status server on the given port.
+ The jsp scripts are taken from src/webapps/<name>.
+ @param name The name of the server
+ @param port The port to use on the server
+ @param findPort whether the server should start at the given port and
+ increment by 1 until it finds a free port.]]>
+ </doc>
+ </constructor>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Set a value in the webapp context. These values are available to the jsp
+ pages as "application.getAttribute(name)".
+ @param name The name of the attribute
+ @param value The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="addServlet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="servletClass" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Add a servlet in the server.
+ @param name The name of the servlet (can be passed as null)
+ @param pathSpec The path spec for the servlet
+ @param servletClass The servlet class]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value in the webapp context.
+ @param name The name of the attribute
+ @return The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the port that the server is on
+ @return the port]]>
+ </doc>
+ </method>
+ <method name="setThreads"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="min" type="int"/>
+ <param name="max" type="int"/>
+ </method>
+ <method name="addSslListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="keystore" type="java.lang.String"/>
+ <param name="storPass" type="java.lang.String"/>
+ <param name="keyPass" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Configure an ssl listener on the server.
+ @param addr address to listen on
+ @param keystore location of the keystore
+ @param storPass password for the keystore
+ @param keyPass password for the key]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start the server. Does not wait for the server to start.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[stop the server]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Create a Jetty embedded server to answer http requests. The primary goal
+ is to serve up status information for the server.
+ There are three contexts:
+ "/logs/" -> points to the log directory
+ "/static/" -> points to common static files (src/webapps/static)
+ "/" -> the jsp server code from (src/webapps/<name>)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer.StackServlet -->
+ <class name="StatusHttpServer.StackServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer.StackServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A very simple servlet to serve up a text representation of the current
+ stack traces. It both returns the stacks to the caller and logs them.
+ Currently the stack traces are done sequentially rather than exactly the
+ same data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer.StackServlet -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet -->
+ <class name="StatusHttpServer.TaskGraphServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer.TaskGraphServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="width" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="height" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="ymargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on y axis]]>
+ </doc>
+ </field>
+ <field name="xmargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on x axis]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The servlet that outputs svg graphics for map / reduce task
+ statuses]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskAttemptID -->
+ <class name="TaskAttemptID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskAttemptID" type="org.apache.hadoop.mapred.TaskID, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskAttemptID object from given {@link TaskID}.
+ @param taskId TaskID that this task belongs to
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskAttemptID" type="java.lang.String, int, boolean, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param taskId taskId number
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link JobID} object that this task attempt belongs to]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link TaskID} object that this task attempt belongs to]]>
+ </doc>
+ </method>
+ <method name="isMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns whether this TaskAttemptID is a map ID]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare TaskIds by first tipIds, then by task numbers.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskAttemptID object from given string
+ @return constructed TaskAttemptID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getTaskAttemptIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <param name="isMap" type="java.lang.Boolean"/>
+ <param name="taskId" type="java.lang.Integer"/>
+ <param name="attemptId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task attempt IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>all task attempt IDs</i>
+ of <i>any jobtracker</i>, in <i>any job</i>, of the <i>first
+ map task</i>, we would use :
+ <pre>
+ TaskAttemptID.getTaskAttemptIDsPattern(null, null, true, 1, null);
+ </pre>
+ which will return :
+ <pre> "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null
+ @param taskId taskId number, or null
+ @param attemptId the task attempt number, or null
+ @return a regex pattern matching TaskAttemptIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[TaskAttemptID represents the immutable and unique identifier for
+ a task attempt. Each task attempt is one particular instance of a Map or
+ Reduce Task identified by its TaskID.
+
+ TaskAttemptID consists of 2 parts. First part is the
+ {@link TaskID}, that this TaskAttemptID belongs to.
+ Second part is the task attempt number. <br>
+ An example TaskAttemptID is :
+ <code>attempt_200707121733_0003_m_000005_0</code> , which represents the
+ zeroth task attempt for the fifth map task in the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskAttemptID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskAttemptID -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <class name="TaskCompletionEvent" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskCompletionEvent"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor for Writable.]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskCompletionEvent" type="int, java.lang.String, int, boolean, org.apache.hadoop.mapred.TaskCompletionEvent.Status, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TaskCompletionEvent" type="int, org.apache.hadoop.mapred.TaskAttemptID, int, boolean, org.apache.hadoop.mapred.TaskCompletionEvent.Status, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor. eventId should be created externally and incremented
+ per event for each job.
+ @param eventId event id, event id should be unique and assigned in
+ incrementally, starting from 0.
+ @param taskId task id
+ @param status task's status
+ @param taskTrackerHttp task tracker's host:port for http.]]>
+ </doc>
+ </constructor>
+ <method name="getEventId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns event Id.
+ @return event id]]>
+ </doc>
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getTaskAttemptId()} instead.">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id
+ @deprecated use {@link #getTaskAttemptId()} instead.]]>
+ </doc>
+ </method>
+ <method name="getTaskAttemptId" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id]]>
+ </doc>
+ </method>
+ <method name="getTaskStatus" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns enum Status.SUCESS or Status.FAILURE.
+ @return task tracker status]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerHttp" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[http location of the tasktracker where this task ran.
+ @return http location of tasktracker user logs]]>
+ </doc>
+ </method>
+ <method name="getTaskRunTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns time (in millisec) the task took to complete.]]>
+ </doc>
+ </method>
+ <method name="setTaskRunTime"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskCompletionTime" type="int"/>
+ <doc>
+ <![CDATA[Set the task completion time
+ @param taskCompletionTime time (in millisec) the task took to complete]]>
+ </doc>
+ </method>
+ <method name="setEventId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="eventId" type="int"/>
+ <doc>
+ <![CDATA[set event Id. should be assigned incrementally starting from 0.
+ @param eventId]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #setTaskID(TaskAttemptID)} instead.">
+ <param name="taskId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId
+ @deprecated use {@link #setTaskID(TaskAttemptID)} instead.]]>
+ </doc>
+ </method>
+ <method name="setTaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId]]>
+ </doc>
+ </method>
+ <method name="setTaskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"/>
+ <doc>
+ <![CDATA[Set task status.
+ @param status]]>
+ </doc>
+ </method>
+ <method name="setTaskTrackerHttp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTrackerHttp" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set task tracker http location.
+ @param taskTrackerHttp]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isMapTask" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="idWithinJob" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="EMPTY_ARRAY" type="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is used to track task completion events on
+ job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <class name="TaskCompletionEvent.Status" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.TaskCompletionEvent.Status&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <!-- start class org.apache.hadoop.mapred.TaskID -->
+ <class name="TaskID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskID" type="org.apache.hadoop.mapred.JobID, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskID object from given {@link JobID}.
+ @param jobId JobID that this tip belongs to
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskID" type="java.lang.String, int, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskInProgressId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link JobID} object that this tip belongs to]]>
+ </doc>
+ </method>
+ <method name="isMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns whether this TaskID is a map ID]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare TaskInProgressIds by first jobIds, then by tip numbers. Reduces are
+ defined as greater then maps.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskID object from given string
+ @return constructed TaskID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getTaskIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <param name="isMap" type="java.lang.Boolean"/>
+ <param name="taskId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>the first map task</i>
+ of <i>any jobtracker</i>, of <i>any job</i>, we would use :
+ <pre>
+ TaskID.getTaskIDsPattern(null, null, true, 1);
+ </pre>
+ which will return :
+ <pre> "task_[^_]*_[0-9]*_m_000001*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null
+ @param taskId taskId number, or null
+ @return a regex pattern matching TaskIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[TaskID represents the immutable and unique identifier for
+ a Map or Reduce Task. Each TaskID encompasses multiple attempts made to
+ execute the Map or Reduce Task, each of which are uniquely indentified by
+ their TaskAttemptID.
+
+ TaskID consists of 3 parts. First part is the {@link JobID}, that this
+ TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r'
+ representing whether the task is a map task or a reduce task.
+ And the third part is the task number. <br>
+ An example TaskID is :
+ <code>task_200707121733_0003_m_000005</code> , which represents the
+ fifth map task in the third job running at the jobtracker
+ started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskID -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog -->
+ <class name="TaskLog" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLog"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskLogFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="getTaskLogFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logsRetainHours" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Purge old user logs.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskLogLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the desired maximum length of task's logs.
+ @param conf the job to look in
+ @return the number of bytes to cap the log files at]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ If the tailLength is 0, the entire output will be saved.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="setup" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ Setup commands such as setting memory limit can be passed which
+ will be executed before exec.
+ If the tailLength is 0, the entire output will be saved.
+ @param setup The setup commands for the execed process.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="addCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="isExecutable" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add quotes to each of the command strings and
+ return as a single string
+ @param cmd The command to be quoted
+ @param isExecutable makes shell path if the first
+ argument is executable
+ @return returns The quoted string.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="captureDebugOut" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="debugoutFilename" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture debug script's
+ stdout and stderr to debugout.
+ @param cmd The command and the arguments that should be run
+ @param debugoutFilename The filename that stdout and stderr
+ should be saved to.
+ @return the modified command that should be run
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple logger to handle the task-specific user logs.
+ This class uses the system property <code>hadoop.log.dir</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <class name="TaskLog.LogName" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.TaskLog.LogName&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskLog.LogName[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskLog.LogName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The filter for userlogs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogAppender -->
+ <class name="TaskLogAppender" extends="org.apache.log4j.FileAppender"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogAppender"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="activateOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Getter/Setter methods for log4j.]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ </method>
+ <method name="getTotalLogFileSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setTotalLogFileSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logSize" type="long"/>
+ </method>
+ <doc>
+ <![CDATA[A simple log4j-appender for the task child's
+ map-reduce system logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogAppender -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogServlet -->
+ <class name="TaskLogServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the logs via http.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A servlet that is run by the TaskTrackers to provide the task logs via http.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskReport -->
+ <class name="TaskReport" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskReport"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getTaskID()} instead">
+ <doc>
+ <![CDATA[@deprecated use {@link #getTaskID()} instead]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The id of the task.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The amount completed, between zero and one.]]>
+ </doc>
+ </method>
+ <method name="getState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The most recent state, reported by a {@link Reporter}.]]>
+ </doc>
+ </method>
+ <method name="getDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A list of error messages.]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A table of counters.]]>
+ </doc>
+ </method>
+ <method name="getFinishTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get finish time of task.
+ @return 0, if finish time was not set else returns finish time.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get start time of task.
+ @return 0 if start time was not set, else start time.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A report on the state of a task.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskReport -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker -->
+ <class name="TaskTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.TaskUmbilicalProtocol"/>
+ <implements name="java.lang.Runnable"/>
+ <constructor name="TaskTracker" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start with the local machine name, and the default JobTracker]]>
+ </doc>
+ </constructor>
+ <method name="getTaskTrackerMetrics" return="org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cleanupStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Removes all contents of temporary storage. Called upon
+ startup, to remove any leftovers from previous run.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close down the TaskTracker and all its components. We must also shutdown
+ any running tasks or threads, and cleanup disk space. A new TaskTracker
+ within the same process space might be restarted, so everything must be
+ clean.]]>
+ </doc>
+ </method>
+ <method name="getJobClient" return="org.apache.hadoop.mapred.InterTrackerProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The connection to the JobTracker, used by the TaskRunner
+ for locating remote files.]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerReportAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the port at which the tasktracker bound to]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The server retry loop.
+ This while-loop attempts to connect to the JobTracker. It only
+ loops when the old TaskTracker has gone bad (its state is
+ stale somehow) and we need to reinitialize everything.]]>
+ </doc>
+ </method>
+ <method name="getTask" return="org.apache.hadoop.mapred.Task"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTask" return="org.apache.hadoop.mapred.Task"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called upon startup by the child process, to fetch Task data.]]>
+ </doc>
+ </method>
+ <method name="statusUpdate" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="status" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="statusUpdate" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskStatus" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called periodically to report Task progress, from 0.0 to 1.0.]]>
+ </doc>
+ </method>
+ <method name="reportDiagnosticInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="info" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportDiagnosticInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="info" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when the task dies before completion, and we want to report back
+ diagnostic info]]>
+ </doc>
+ </method>
+ <method name="ping" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ping" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Child checking to see if we're alive. Normally does nothing.]]>
+ </doc>
+ </method>
+ <method name="done"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="shouldPromote" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="done"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldPromote" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The task is done.]]>
+ </doc>
+ </method>
+ <method name="shuffleError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="shuffleError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A reduce-task failed to shuffle the map-outputs. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="fsError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="fsError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A child task had a local filesystem error. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="getMapCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="fromid" type="int"/>
+ <param name="maxlocs" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMapCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxLocs" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mapOutputLost"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mapOutputLost"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="errorMsg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A completed map task's output has been lost.]]>
+ </doc>
+ </method>
+ <method name="isIdle" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this task tracker idle?
+ @return has this task tracker finished and cleaned up all of its tasks?]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Start the TaskTracker, point toward the indicated JobTracker]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[TaskTracker is a process that starts and tracks MR Tasks
+ in a networked environment. It contacts the JobTracker
+ for Task assignments and reporting results.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.Child -->
+ <class name="TaskTracker.Child" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskTracker.Child"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ </method>
+ <doc>
+ <![CDATA[The main() for child processes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.Child -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <class name="TaskTracker.MapOutputServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskTracker.MapOutputServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in TaskTracker's Jetty to serve the map outputs
+ to other nodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics -->
+ <class name="TaskTracker.TaskTrackerMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics -->
+ <!-- start class org.apache.hadoop.mapred.TextInputFormat -->
+ <class name="TextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="TextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Keys are
+ the position in the file, and values are the line of text..]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat -->
+ <class name="TextOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes plain text files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+ <class name="TextOutputFormat.LineRecordWriter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"/>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+</package>
+<package name="org.apache.hadoop.mapred.jobcontrol">
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <class name="Job" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf, java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+ @param jobConf a mapred job configuration representing a job to be executed.
+ @param dependingJobs an array of jobs the current job depends on]]>
+ </doc>
+ </constructor>
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+
+ @param jobConf mapred job configuration representing a job to be executed.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job name of this job]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job name for this job.
+ @param jobName the job name]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job ID of this job assigned by JobControl]]>
+ </doc>
+ </method>
+ <method name="setJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job ID for this job.
+ @param id the job ID]]>
+ </doc>
+ </method>
+ <method name="getMapredJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getAssignedJobID()} instead">
+ <doc>
+ <![CDATA[@return the mapred ID of this job
+ @deprecated use {@link #getAssignedJobID()} instead]]>
+ </doc>
+ </method>
+ <method name="setMapredJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #setAssignedJobID(JobID)} instead">
+ <param name="mapredJobID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job.
+ @param mapredJobID the mapred job ID for this job.
+ @deprecated use {@link #setAssignedJobID(JobID)} instead]]>
+ </doc>
+ </method>
+ <method name="getAssignedJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred ID of this job as assigned by the
+ mapred framework.]]>
+ </doc>
+ </method>
+ <method name="setAssignedJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mapredJobID" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job as assigned by the
+ mapred framework.
+ @param mapredJobID the mapred job ID for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred job conf of this job]]>
+ </doc>
+ </method>
+ <method name="setJobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Set the mapred job conf for this job.
+ @param jobConf the mapred job conf for this job.]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the state of this job]]>
+ </doc>
+ </method>
+ <method name="setState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Set the state for this job.
+ @param state the new state for this job.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the message of this job]]>
+ </doc>
+ </method>
+ <method name="setMessage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="message" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the message for this job.
+ @param message the message for this job.]]>
+ </doc>
+ </method>
+ <method name="getDependingJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the depending jobs of this job]]>
+ </doc>
+ </method>
+ <method name="addDependingJob" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dependingJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a job to this jobs' dependency list. Dependent jobs can only be added while a Job
+ is waiting to run, not during or afterwards.
+
+ @param dependingJob Job that this Job depends on.
+ @return <tt>true</tt> if the Job was added.]]>
+ </doc>
+ </method>
+ <method name="isCompleted" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in a complete state]]>
+ </doc>
+ </method>
+ <method name="isReady" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in READY state]]>
+ </doc>
+ </method>
+ <method name="submit"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Submit this job to mapred. The state becomes RUNNING if submission
+ is successful, FAILED otherwise.]]>
+ </doc>
+ </method>
+ <field name="SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WAITING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEPENDENT_FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class encapsulates a MapReduce job and its dependency. It monitors
+ the states of the depending jobs and updates the state of this job.
+ A job stats in the WAITING state. If it does not have any deoending jobs, or
+ all of the depending jobs are in SUCCESS state, then the job state will become
+ READY. If any depending jobs fail, the job will fail too.
+ When in READY state, the job can be submitted to Hadoop for execution, with
+ the state changing into RUNNING state. From RUNNING state, the job can get into
+ SUCCESS or FAILED state, depending the status of the jon execution.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+ <class name="JobControl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobControl" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a job control for a group of jobs.
+ @param groupName a name identifying this group]]>
+ </doc>
+ </constructor>
+ <method name="getWaitingJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the waiting state]]>
+ </doc>
+ </method>
+ <method name="getRunningJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the running state]]>
+ </doc>
+ </method>
+ <method name="getReadyJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the ready state]]>
+ </doc>
+ </method>
+ <method name="getSuccessfulJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the success state]]>
+ </doc>
+ </method>
+ <method name="getFailedJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="addJob" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="aJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a new job.
+ @param aJob the new job]]>
+ </doc>
+ </method>
+ <method name="addJobs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobs" type="java.util.Collection&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"/>
+ <doc>
+ <![CDATA[Add a collection of jobs
+
+ @param jobs]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the thread state]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[set the thread state to STOPPING so that the
+ thread will stop when it wakes up.]]>
+ </doc>
+ </method>
+ <method name="suspend"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[suspend the running thread]]>
+ </doc>
+ </method>
+ <method name="resume"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[resume the suspended thread]]>
+ </doc>
+ </method>
+ <method name="allFinished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The main loop for the thread.
+ The loop does the following:
+ Check the states of the running jobs
+ Update the states of waiting jobs
+ Submit the jobs in ready state]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a set of MapReduce jobs and its dependency. It tracks
+ the states of the jobs by placing them into different tables according to their
+ states.
+
+ This class provides APIs for the client app to add a job to the group and to get
+ the jobs in the group in different states. When a
+ job is added, an ID unique to the group is assigned to the job.
+
+ This class has a thread that submits jobs when they become ready, monitors the
+ states of the running jobs, and updates the states of jobs based on the state changes
+ of their depending jobs states. The class provides APIs for suspending/resuming
+ the thread,and for stopping the thread.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+</package>
+<package name="org.apache.hadoop.mapred.join">
+ <!-- start class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <class name="ArrayListBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"/>
+ <constructor name="ArrayListBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayListBackedIterator" type="java.util.ArrayList&lt;X&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. The
+ implementation uses an {@link java.util.ArrayList} to store elements
+ added to it, replaying them as requested.
+ Prefer {@link StreamBackedIterator}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <interface name="ComposableInputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Refinement of InputFormat requiring implementors to provide
+ ComposableRecordReader instead of RecordReader.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <interface name="ComposableRecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"/>
+ <implements name="java.lang.Comparable&lt;org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;&gt;"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key this RecordReader would supply on a call to next(K,V)]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RecordReader into the object provided.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the stream is not empty, but provides no guarantee that
+ a call to next(K,V) will succeed.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[While key-value pairs from this RecordReader match the given key, register
+ them with the JoinCollector provided.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Additional operations required of a RecordReader to participate in a join.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <class name="CompositeInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="CompositeInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Interpret a given string as a composite expression.
+ {@code
+ func ::= <ident>([<func>,]*<func>)
+ func ::= tbl(<class>,"<path>")
+ class ::= @see java.lang.Class#forName(java.lang.String)
+ path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String)
+ }
+ Reads expression from the <tt>mapred.join.expr</tt> property and
+ user-supplied join types from <tt>mapred.join.define.&lt;ident&gt;</tt>
+ types. Paths supplied to <tt>tbl</tt> are given as input paths to the
+ InputFormat class listed.
+ @see #compose(java.lang.String, java.lang.Class, java.lang.String...)]]>
+ </doc>
+ </method>
+ <method name="addDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds the default set of identifiers to the parser.]]>
+ </doc>
+ </method>
+ <method name="validateInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify that this composite has children and that all its children
+ can validate their input.]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a CompositeInputSplit from the child InputFormats by assigning the
+ ith split from each child to the ith composite split.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a CompositeRecordReader for the children of this InputFormat
+ as defined in the init expression.
+ The outermost join need only be composable, not necessarily a composite.
+ Mandating TupleWritable isn't strictly correct.]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given InputFormat class (inf), path (p) return:
+ {@code tbl(<inf>, <p>) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An InputFormat capable of performing joins over a set of data sources sorted
+ and partitioned the same way.
+ @see #setFormat
+
+ A user may define new join types by setting the property
+ <tt>mapred.join.define.&lt;ident&gt;</tt> to a classname. In the expression
+ <tt>mapred.join.expr</tt>, the identifier will be assumed to be a
+ ComposableRecordReader.
+ <tt>mapred.join.keycomparator</tt> can be a classname used to compare keys
+ in the join.
+ @see JoinRecordReader
+ @see MultiFilterRecordReader]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <class name="CompositeInputSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="CompositeInputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CompositeInputSplit" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.mapred.InputSplit"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an InputSplit to this collection.
+ @throws IOException If capacity was not specified during construction
+ or if capacity has been reached.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the aggregate length of all child InputSplits currently added.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the length of ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Collect a set of hosts from all child InputSplits.]]>
+ </doc>
+ </method>
+ <method name="getLocation" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[getLocations from ith InputSplit.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write splits in the following format.
+ {@code
+ <count><class1><class2>...<classn><split1><split2>...<splitn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}
+ @throws IOException If the child InputSplit cannot be read, typically
+ for faliing access checks.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This InputSplit contains a set of child InputSplits. Any InputSplit inserted
+ into this collection must have a public default constructor.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <class name="CompositeRecordReader" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="CompositeRecordReader" type="int, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a RecordReader with <tt>capacity</tt> children to position
+ <tt>id</tt> in the parent reader.
+ The id of a root CompositeRecordReader is -1 by convention, but relying
+ on this is not recommended.]]>
+ </doc>
+ </constructor>
+ <method name="combine" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ </method>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordReaderQueue" return="java.util.PriorityQueue&lt;org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return sorted list of RecordReaders for this composite.]]>
+ </doc>
+ </method>
+ <method name="getComparator" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return comparator defining the ordering for RecordReaders in this
+ composite.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rr" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ? extends V&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a RecordReader to this collection.
+ The id() of a RecordReader determines where in the Tuple its
+ entry will appear. Adding RecordReaders with the same id has
+ undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key for the current join or the value at the top of the
+ RecordReader heap.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the top of this RR into the given object.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if it is possible that this could emit more values.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Pass skip key to child RRs.]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Obtain an iterator over the child RRs apropos of the value type
+ ultimately emitted from this join.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If key provided matches that of this Composite, give JoinCollector
+ iterator over values it may emit.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For all child RRs offering the key provided, obtain an iterator
+ at that position in the JoinCollector.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key of join or head of heap
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new key value common to all child RRs.
+ @throws ClassCastException if key classes differ.]]>
+ </doc>
+ </method>
+ <method name="createInternalValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a value to be used internally for joins.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unsupported (returns zero in all cases).]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all child RRs.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report progress as the minimum of all child RR progress.]]>
+ </doc>
+ </method>
+ <field name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, V, X&gt;.JoinCollector"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="kids" type="org.apache.hadoop.mapred.join.ComposableRecordReader[]"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A RecordReader that can effect joins of RecordReaders sharing a common key
+ type and partitioning.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <class name="InnerJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader&lt;K&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Return true iff the tuple is full (all data sources contain this key).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full inner join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <class name="JoinRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, org.apache.hadoop.io.Writable, org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="JoinRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Emit the next set of key, value pairs as defined by the child
+ RecordReaders and operation associated with this composite RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator wrapping the JoinCollector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite joins returning Tuples of arbitrary Writables.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <class name="JoinRecordReader.JoinDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="JoinRecordReader.JoinDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Since the JoinCollector is effecting our operation, we need only
+ provide an iterator proxy wrapping its operation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <class name="MultiFilterRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, V, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, V&gt;"/>
+ <constructor name="MultiFilterRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="emit" return="V extends org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For each tuple emitted, return a value (typically one of the values
+ in the tuple).
+ Modifying the Writables in the tuple is permitted and unlikely to affect
+ join behavior in most cases, but it is not recommended. It's safer to
+ clone first.]]>
+ </doc>
+ </method>
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Default implementation offers {@link #emit} every Tuple from the
+ collector (the outer join of child RRs).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createValue" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator returning a single value from the tuple.
+ @see MultiFilterDelegationIterator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite join returning values derived from multiple
+ sources, but generally not tuples.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <class name="MultiFilterRecordReader.MultiFilterDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;V&gt;"/>
+ <constructor name="MultiFilterRecordReader.MultiFilterDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy the JoinCollector, but include callback to emit.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <class name="OuterJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader&lt;K&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit everything from the collector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full outer join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <class name="OverrideRecordReader" extends="org.apache.hadoop.mapred.join.MultiFilterRecordReader&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="emit" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit the value with the highest position in the tuple.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instead of filling the JoinCollector with iterators from all
+ data sources, fill only the rightmost for this key.
+ This not only saves space by discarding the other sources, but
+ it also emits the number of key-value pairs in the preferred
+ RecordReader instead of repeating that stream n times, where
+ n is the cardinality of the cross product of the discarded
+ streams for the given key.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Prefer the &quot;rightmost&quot; data source for this key.
+ For example, <tt>override(S1,S2,S3)</tt> will prefer values
+ from S3 over S2, and values from S2 over S1 for all keys
+ emitted from all sources.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser -->
+ <class name="Parser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Very simple shift-reduce parser for join expressions.
+
+ This should be sufficient for the user extension permitted now, but ought to
+ be replaced with a parser generator if more complex grammars are supported.
+ In particular, this &quot;shift-reduce&quot; parser has no states. Each set
+ of formals requires a different internal node type, which is responsible for
+ interpreting the list of tokens it receives. This is sufficient for the
+ current grammar, but it has several annoying properties that might inhibit
+ extension. In particular, parenthesis are always function calls; an
+ algebraic or filter grammar would not only require a node type, but must
+ also work around the internals of this parser.
+
+ For most other cases, adding classes to the hierarchy- particularly by
+ extending JoinRecordReader and MultiFilterRecordReader- is fairly
+ straightforward. One need only override the relevant method(s) (usually only
+ {@link CompositeRecordReader#combine}) and include a property to map its
+ value to an identifier in the parser.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Node -->
+ <class name="Parser.Node" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat"/>
+ <constructor name="Parser.Node" type="java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addIdentifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="ident" type="java.lang.String"/>
+ <param name="mcstrSig" type="java.lang.Class[]"/>
+ <param name="nodetype" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.join.Parser.Node&gt;"/>
+ <param name="cl" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.join.ComposableRecordReader&gt;"/>
+ <exception name="NoSuchMethodException" type="java.lang.NoSuchMethodException"/>
+ <doc>
+ <![CDATA[For a given identifier, add a mapping to the nodetype for the parse
+ tree and to the ComposableRecordReader to be created, including the
+ formals required to invoke the constructor.
+ The nodetype and constructor signature should be filled in from the
+ child node.]]>
+ </doc>
+ </method>
+ <method name="setID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="int"/>
+ </method>
+ <method name="setKeyComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="cmpcl" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"/>
+ </method>
+ <field name="rrCstrMap" type="java.util.Map&lt;java.lang.String, java.lang.reflect.Constructor&lt;? extends org.apache.hadoop.mapred.join.ComposableRecordReader&gt;&gt;"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ident" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="cmpcl" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Node -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <class name="Parser.NodeToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <class name="Parser.NumToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.NumToken" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <class name="Parser.StrToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.StrToken" type="org.apache.hadoop.mapred.join.Parser.TType, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Token -->
+ <class name="Parser.Token" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getType" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Tagged-union type for tokens from the join expression.
+ @see Parser.TType]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Token -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.TType -->
+ <class name="Parser.TType" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.join.Parser.TType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.join.Parser.TType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.TType -->
+ <!-- start interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <interface name="ResetableIterator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True iff a call to next will succeed.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign next value to actual.
+ It is required that elements added to a ResetableIterator be returned in
+ the same order after a call to {@link #reset} (FIFO).
+
+ Note that a call to this may fail for nested joins (i.e. more elements
+ available, but none satisfying the constraints of the join)]]>
+ </doc>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign last value returned to actual.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set iterator to return to the start of its range. Must be called after
+ calling {@link #add} to avoid a ConcurrentModificationException.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an element to the collection of elements to iterate over.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close datasources and release resources. Calling methods on the iterator
+ after calling close has undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Close datasources, but do not release internal resources. Calling this
+ method should permit the object to be reused with a different datasource.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This defines an interface to a stateful Iterator that can replay elements
+ added to it directly.
+ Note that this does not extend {@link java.util.Iterator}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <class name="ResetableIterator.EMPTY" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;U&gt;"/>
+ <constructor name="ResetableIterator.EMPTY"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <!-- start class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <class name="StreamBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"/>
+ <constructor name="StreamBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. This
+ implementation uses a byte array to store elements added to it.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.TupleWritable -->
+ <class name="TupleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="TupleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty tuple with no allocated storage for writables.]]>
+ </doc>
+ </constructor>
+ <constructor name="TupleWritable" type="org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Initialize tuple with storage; unknown whether any of them contain
+ &quot;written&quot; values.]]>
+ </doc>
+ </constructor>
+ <method name="has" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Return true if tuple has an element at the position provided.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith Writable from Tuple.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of children in this Tuple.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator over the elements in this tuple.
+ Note that this doesn't flatten the tuple; one may receive tuples
+ from this iterator.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert Tuple to String as in the following.
+ <tt>[<child1>,<child2>,...,<childn>]</tt>]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes each Writable to <code>out</code>.
+ TupleWritable format:
+ {@code
+ <count><type1><type2>...<typen><obj1><obj2>...<objn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writable type storing multiple {@link org.apache.hadoop.io.Writable}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.TupleWritable -->
+ <!-- start class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+ <class name="WrappedRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, U&gt;"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key at the head of this RR.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RR into the object supplied.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if the RR- including the k,v pair stored in this object-
+ is exhausted.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next k,v pair into the head of this object; return true iff
+ the RR and this are exhausted.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an iterator to the collector at the position occupied by this
+ RecordReader over the values in this stream paired with the key
+ provided (ie register a stream of values from this source matching K
+ with a collector).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write key-value pair at the head of this stream to the objects provided;
+ get next key-value pair from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new key from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="U extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new value from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request progress from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request position from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Forward close request to proxied RR.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key at head of proxied RR
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true iff compareTo(other) retn true.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy class for a RecordReader participating in the join framework.
+ This class keeps track of the &quot;head&quot; key-value pair for the
+ provided RecordReader and keeps a store of values matching a key when
+ this source is participating in a join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+</package>
+<package name="org.apache.hadoop.mapred.lib">
+ <!-- start class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <class name="FieldSelectionMapReduce" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="FieldSelectionMapReduce"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="val" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to output.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements a mapper/reducer class that can be used to perform
+ field selections in a manner similar to unix cut. The input data is treated
+ as fields separated by a user specified separator (the default value is
+ "\t"). The user can specify a list of fields that form the map output keys,
+ and a list of fields that form the map output values. If the inputformat is
+ TextInputFormat, the mapper will ignore the key to the map function. and the
+ fields are from the value only. Otherwise, the fields are the union of those
+ from the key and those from the value.
+
+ The field separator is under attribute "mapred.data.field.separator"
+
+ The map output field list spec is under attribute "map.output.key.value.fields.spec".
+ The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
+ key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
+ Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
+ (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all
+ the fields starting from field 3. The open range field spec applies value fields only.
+ They have no effect on the key fields.
+
+ Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
+ and use fields 6,5,1,2,3,7 and above for values.
+
+ The reduce output field list spec is under attribute "reduce.output.key.value.fields.spec".
+
+ The reducer extracts output key/value pairs in a similar manner, except that
+ the key is never ignored.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <!-- start class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <class name="HashPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K2, V2&gt;"/>
+ <constructor name="HashPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numReduceTasks" type="int"/>
+ <doc>
+ <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partition keys by their {@link Object#hashCode()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <class name="IdentityMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, K, V&gt;"/>
+ <constructor name="IdentityMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="val" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, V&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to
+ output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implements the identity function, mapping inputs directly to outputs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <class name="IdentityReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;K, V, K, V&gt;"/>
+ <constructor name="IdentityReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="values" type="java.util.Iterator&lt;V&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, V&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes all keys and values directly to output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Performs no reduction, writing all input values directly to the output.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <class name="InverseMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, V, K&gt;"/>
+ <constructor name="InverseMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;V, K&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The inverse function. Input keys and values are swapped.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that swaps keys and values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <class name="KeyFieldBasedPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K2, V2&gt;"/>
+ <constructor name="KeyFieldBasedPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numReduceTasks" type="int"/>
+ <doc>
+ <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <class name="LongSumReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;K, org.apache.hadoop.io.LongWritable, K, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="LongSumReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Reducer} that sums long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <class name="MultipleOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a composite record writer that can write key/value data to different
+ output files
+
+ @param fs
+ the file system to use
+ @param job
+ the job conf for the job
+ @param name
+ the leaf file name for the output file (such as part-00000")
+ @param arg3
+ a progressable for reporting progress.
+ @return a composite record writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="generateLeafFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the leaf name for the output file name. The default behavior does
+ not change the leaf file name (such as part-00000)
+
+ @param name
+ the leaf file name for the output file
+ @return the given leaf file name]]>
+ </doc>
+ </method>
+ <method name="generateFileNameForKeyValue" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the file output file name based on the given key and the leaf file
+ name. The default behavior is that the file name does not depend on the
+ key.
+
+ @param key
+ the key of the output data
+ @param name
+ the leaf file name
+ @return generated file name]]>
+ </doc>
+ </method>
+ <method name="generateActualKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <doc>
+ <![CDATA[Generate the actual key from the given key/value. The default behavior is that
+ the actual key is equal to the given key
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual key derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="generateActualValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <doc>
+ <![CDATA[Generate the actual value from the given key and value. The default behavior is that
+ the actual value is equal to the given value
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual value derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="getInputFileBasedOutputFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the outfile name based on a given anme and the input file name. If
+ the map input file does not exists (i.e. this is not for a map only job),
+ the given name is returned unchanged. If the config value for
+ "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
+ name is returned unchanged. Otherwise, return a file name consisting of the
+ N trailing legs of the input file name where N is the config value for
+ "num.of.trailing.legs.to.use".
+
+ @param job
+ the job config
+ @param name
+ the output file name
+ @return the outfile name based on a given anme and the input file name.]]>
+ </doc>
+ </method>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param fs
+ the file system to use
+ @param job
+ a job conf object
+ @param name
+ the name of the file over which a record writer object will be
+ constructed
+ @param arg3
+ a progressable object
+ @return A RecordWriter object over the given file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This abstract class extends the OutputFormatBase, allowing to write the
+ output data to different output files. There are three basic use cases for
+ this class.
+
+ Case one: This class is used for a map reduce job with at least one reducer.
+ The reducer wants to write data to different files depending on the actual
+ keys. It is assumed that a key (or value) encodes the actual key (value)
+ and the desired location for the actual key (value).
+
+ Case two: This class is used for a map only job. The job wants to use an
+ output file name that is either a part of the input file name of the input
+ data, or some derivation of it.
+
+ Case three: This class is used for a map only job. The job wants to use an
+ output file name that depends on both the keys and the input file name,]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <class name="MultipleSequenceFileOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleSequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output data
+ to different output files in sequence file output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <class name="MultipleTextOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleTextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output
+ data to different output files in Text output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <class name="MultithreadedMapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="MultithreadedMapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Multithreaded implementation for @link org.apache.hadoop.mapred.MapRunnable.
+ <p>
+ It can be used instead of the default implementation,
+ @link org.apache.hadoop.mapred.MapRunner, when the Map operation is not CPU
+ bound in order to improve throughput.
+ <p>
+ Map implementations using this MapRunnable must be thread-safe.
+ <p>
+ The Map-Reduce job has to be configured to use this MapRunnable class (using
+ the JobConf.setMapRunnerClass method) and
+ the number of thread the thread-pool can use with the
+ <code>mapred.map.multithreadedrunner.threads</code> property, its default
+ value is 10 threads.
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <!-- start class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+ <class name="NLineInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="NLineInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically splits the set of input files for the job, splits N lines
+ of the input as one split.
+
+ @see org.apache.hadoop.mapred.FileInputFormat#getSplits(JobConf, int)]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[NLineInputFormat which splits N lines of input as one split.
+
+ In many "pleasantly" parallel applications, each process/mapper
+ processes the same input file (s), but with computations are
+ controlled by different parameters.(Referred to as "parameter sweeps").
+ One way to achieve this, is to specify a set of parameters
+ (one set per line) as input in a control file
+ (which is the input path to the map-reduce application,
+ where as the input dataset is specified
+ via a config variable in JobConf.).
+
+ The NLineInputFormat can be used in such applications, that splits
+ the input file such that by default, one line is fed as
+ a value to one map task, and key is the offset.
+ i.e. (k,v) is (LongWritable, Text).
+ The location hints will span the whole mapred cluster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <class name="NullOutputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="NullOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[Consume all outputs and put them in /dev/null.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <class name="RegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="RegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+ <class name="TokenCountMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="TokenCountMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that maps text values into <token,freq> pairs. Uses
+ {@link StringTokenizer} to break text into tokens.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+</package>
+<package name="org.apache.hadoop.mapred.lib.aggregate">
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <class name="DoubleValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="DoubleValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a double value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="double"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a double value.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getSum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up a sequence of double
+ values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <class name="LongValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the maximum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <class name="LongValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the minimum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <class name="LongValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getSum" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <class name="StringValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the biggest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <class name="StringValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the smallest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <class name="UniqValueCount" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="UniqValueCount"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UniqValueCount" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor
+ @param maxNum the limit in the number of unique values to keep.]]>
+ </doc>
+ </constructor>
+ <method name="setMaxItems" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <doc>
+ <![CDATA[Set the limit on the number of unique values
+ @param n the desired limit on the number of unique values
+ @return the new limit on the number of unique values]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return the number of unique objects aggregated]]>
+ </doc>
+ </method>
+ <method name="getUniqueItems" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the set of the unique objects]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of the unique objects. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that dedupes a sequence of objects.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <class name="UserDefinedValueAggregatorDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="UserDefinedValueAggregatorDescriptor" type="java.lang.String, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param className the class name of the user defined descriptor class
+ @param job a configure object used for decriptor configuration]]>
+ </doc>
+ </constructor>
+ <method name="createInstance" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="className" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create an instance of the given class
+ @param className the name of the class
+ @return a dynamically created instance of the given class]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pairs
+ by delegating the invocation to the real object.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a wrapper for a user defined value aggregator descriptor.
+ It servs two functions: One is to create an object of ValueAggregatorDescriptor from the
+ name of a user defined class that may be dynamically loaded. The other is to
+ deligate inviokations of generateKeyValPairs function to the created object.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <interface name="ValueAggregator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val the value to be added]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the agregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return an array of values as the outputs of the combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface defines the minimal protocol for value aggregators.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <class name="ValueAggregatorBaseDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="ValueAggregatorBaseDescriptor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="generateEntry" return="java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <param name="id" type="java.lang.String"/>
+ <param name="val" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @param id the aggregation id
+ @param val the val associated with the id to be aggregated
+ @return an Entry whose key is the aggregation id prefixed with
+ the aggregation type.]]>
+ </doc>
+ </method>
+ <method name="generateValueAggregator" return="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @return a value aggregator of the given type.]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate 1 or 2 aggregation-id/value pairs for the given key/value pair.
+ The first id will be of type LONG_VALUE_SUM, with "record_count" as
+ its aggregation id. If the input is a file split,
+ the second id of the same type will be generated too, with the file name
+ as its aggregation id. This achieves the behavior of counting the total number
+ of records in the input data, and the number of records in each input file.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[get the input file name.
+
+ @param job a job configuration object]]>
+ </doc>
+ </method>
+ <field name="UNIQ_VALUE_COUNT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VALUE_HISTOGRAM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputFile" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements the common functionalities of
+ the subclasses of ValueAggregatorDescriptor class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <class name="ValueAggregatorCombiner" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorCombiner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Combiner does not need to configure.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Combines values for a given key.
+ @param key the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values.
+ @param values the values to combine
+ @param output to collect combined values]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic combiner of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <interface name="ValueAggregatorDescriptor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pair.
+ This function is usually called by the mapper of an Aggregate based job.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configure the object
+
+ @param job
+ a JobConf object that may contain the information that can be used
+ to configure the object.]]>
+ </doc>
+ </method>
+ <field name="TYPE_SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ONE" type="org.apache.hadoop.io.Text"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This interface defines the contract a value aggregator descriptor must
+ support. Such a descriptor can be configured with a JobConf object. Its main
+ function is to generate a list of aggregation-id/value pairs. An aggregation
+ id encodes an aggregation type which is used to guide the way to aggregate
+ the value in the reduce/combiner phrase of an Aggregate based job.The mapper in
+ an Aggregate based map/reduce job may create one or more of
+ ValueAggregatorDescriptor objects at configuration time. For each input
+ key/value pair, the mapper will use those objects to create aggregation
+ id/value pairs.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <class name="ValueAggregatorJob" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorJob"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation. Generic hadoop
+ arguments are accepted.
+ @return a JobConf object ready for submission.
+
+ @throws IOException
+ @see GenericOptionsParser]]>
+ </doc>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setAggregatorDescriptors"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create and run an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the main class for creating a map/reduce job using Aggregate
+ framework. The Aggregate is a specialization of map/reduce framework,
+ specilizing for performing various simple aggregations.
+
+ Generally speaking, in order to implement an application using Map/Reduce
+ model, the developer is to implement Map and Reduce functions (and possibly
+ combine function). However, a lot of applications related to counting and
+ statistics computing have very similar characteristics. Aggregate abstracts
+ out the general patterns of these functions and implementing those patterns.
+ In particular, the package provides generic mapper/redducer/combiner classes,
+ and a set of built-in value aggregators, and a generic utility class that
+ helps user create map/reduce jobs using the generic class. The built-in
+ aggregators include:
+
+ sum over numeric values count the number of distinct values compute the
+ histogram of values compute the minimum, maximum, media,average, standard
+ deviation of numeric values
+
+ The developer using Aggregate will need only to provide a plugin class
+ conforming to the following interface:
+
+ public interface ValueAggregatorDescriptor { public ArrayList<Entry>
+ generateKeyValPairs(Object key, Object value); public void
+ configure(JobConfjob); }
+
+ The package also provides a base class, ValueAggregatorBaseDescriptor,
+ implementing the above interface. The user can extend the base class and
+ implement generateKeyValPairs accordingly.
+
+ The primary work of generateKeyValPairs is to emit one or more key/value
+ pairs based on the input key/value pair. The key in an output key/value pair
+ encode two pieces of information: aggregation type and aggregation id. The
+ value will be aggregated onto the aggregation id according the aggregation
+ type.
+
+ This class offers a function to generate a map/reduce job using Aggregate
+ framework. The function takes the following parameters: input directory spec
+ input format (text or sequence file) output directory a file specifying the
+ user plugin class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <class name="ValueAggregatorJobBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K1, V1, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="ValueAggregatorJobBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="logSpec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="aggregatorDescriptorList" type="java.util.ArrayList&lt;org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This abstract class implements some common functionalities of the
+ the generic mapper, reducer and combiner classes of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <class name="ValueAggregatorMapper" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[the map function. It iterates through the value aggregator descriptor
+ list to generate aggregation id/value pairs and emit them.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="org.apache.hadoop.io.Text"/>
+ <param name="arg1" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic mapper of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <class name="ValueAggregatorReducer" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param key
+ the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values. In effect, data
+ driven computing is achieved. It is assumed that each aggregator's
+ getReport method emits appropriate output for the aggregator. This
+ may be further customiized.
+ @value the values to be aggregated]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic reducer of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+ <class name="ValueHistogram" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="ValueHistogram"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add the given val to the aggregator.
+
+ @param val the value to be added. It is expected to be a string
+ in the form of xxxx\tnum, meaning xxxx has num occurrences.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this aggregator.
+ It includes the following basic statistics of the histogram:
+ the number of unique values
+ the minimum value
+ the media value
+ the maximum value
+ the average value
+ the standard deviation]]>
+ </doc>
+ </method>
+ <method name="getReportDetails" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a string representation of the list of value/frequence pairs of
+ the histogram]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a list value/frequence pairs.
+ The return value is expected to be used by the reducer.]]>
+ </doc>
+ </method>
+ <method name="getReportItems" return="java.util.TreeMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a TreeMap representation of the histogram]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that computes the
+ histogram of a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+</package>
+<package name="org.apache.hadoop.mapred.pipes">
+ <!-- start class org.apache.hadoop.mapred.pipes.Submitter -->
+ <class name="Submitter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Submitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExecutable" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the URI of the application's executable.
+ @param conf
+ @return the URI where the application's executable is located]]>
+ </doc>
+ </method>
+ <method name="setExecutable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="executable" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the URI for the application's executable. Normally this is a hdfs:
+ location.
+ @param conf
+ @param executable The URI of the application's executable.]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job is using a Java RecordReader.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordReader" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java RecordReader
+ @param conf the configuration to check
+ @return is it a Java RecordReader?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Mapper is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaMapper" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Mapper.
+ @param conf the configuration to check
+ @return is it a Java Mapper?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaReducer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Reducer is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaReducer" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Reducer.
+ @param conf the configuration to check
+ @return is it a Java Reducer?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job will use a Java RecordWriter.
+ @param conf the configuration to modify
+ @param value the new value to set]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordWriter" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Will the reduce use a Java RecordWriter?
+ @param conf the configuration to check
+ @return true, if the output of the job will be written by Java]]>
+ </doc>
+ </method>
+ <method name="getKeepCommandFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Does the user want to keep the command file for debugging? If this is
+ true, pipes will write a copy of the command data to a file in the
+ task directory named "downlink.data", which may be used to run the C++
+ program under the debugger. You probably also want to set
+ JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from
+ being deleted.
+ To run using the data file, set the environment variable
+ "hadoop.pipes.command.file" to point to the file.
+ @param conf the configuration to check
+ @return will the framework save the command file?]]>
+ </doc>
+ </method>
+ <method name="setKeepCommandFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether to keep the command file for debugging
+ @param conf the configuration to modify
+ @param keep the new value]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications
+ to the job to run under pipes are made to the configuration.
+ @param conf the job to submit to the cluster (MODIFIED)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Submit a pipes job based on the command line arguments.
+ @param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The main entry point and job submitter. It may either be used as a command
+ line-based or API-based method to launch Pipes jobs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.pipes.Submitter -->
+</package>
+<package name="org.apache.hadoop.metrics">
+ <!-- start class org.apache.hadoop.metrics.ContextFactory -->
+ <class name="ContextFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ContextFactory"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of ContextFactory]]>
+ </doc>
+ </constructor>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the named attribute, or null if there is no
+ attribute of that name.
+
+ @param attributeName the attribute name
+ @return the attribute value]]>
+ </doc>
+ </method>
+ <method name="getAttributeNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all the factory's attributes.
+
+ @return the attribute names]]>
+ </doc>
+ </method>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Sets the named factory attribute to the specified value, creating it
+ if it did not already exist. If the value is null, this is the same as
+ calling removeAttribute.
+
+ @param attributeName the attribute name
+ @param value the new attribute value]]>
+ </doc>
+ </method>
+ <method name="removeAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes the named attribute if it exists.
+
+ @param attributeName the attribute name]]>
+ </doc>
+ </method>
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <exception name="InstantiationException" type="java.lang.InstantiationException"/>
+ <exception name="IllegalAccessException" type="java.lang.IllegalAccessException"/>
+ <doc>
+ <![CDATA[Returns the named MetricsContext instance, constructing it if necessary
+ using the factory's current configuration attributes. <p/>
+
+ When constructing the instance, if the factory property
+ <i>contextName</i>.class</code> exists,
+ its value is taken to be the name of the class to instantiate. Otherwise,
+ the default is to create an instance of
+ <code>org.apache.hadoop.metrics.spi.NullContext</code>, which is a
+ dummy "no-op" context which will cause all metric data to be discarded.
+
+ @param contextName the name of the context
+ @return the named MetricsContext]]>
+ </doc>
+ </method>
+ <method name="getNullContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a "null" context - one which does nothing.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the singleton ContextFactory instance, constructing it if
+ necessary. <p/>
+
+ When the instance is constructed, this method checks if the file
+ <code>hadoop-metrics.properties</code> exists on the class path. If it
+ exists, it must be in the format defined by java.util.Properties, and all
+ the properties in the file are set as attributes on the newly created
+ ContextFactory instance.
+
+ @return the singleton ContextFactory instance]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factory class for creating MetricsContext objects. To obtain an instance
+ of this class, use the static <code>getFactory()</code> method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ContextFactory -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsContext -->
+ <interface name="MetricsContext" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.
+
+ @return the context name]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records as they are
+ updated.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free any data that the implementation
+ may have buffered for sending at the next timer event. It
+ is OK to call <code>startMonitoring()</code> again after calling
+ this.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and also frees any buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new MetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at regular time intervals, as
+ determined by the implementation-class specific configuration.
+
+ @param updater object to be run periodically; it should updated
+ some metrics records and then return]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_PERIOD" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default period in seconds at which data is sent to the metrics system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The main interface to the metrics package.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.MetricsException -->
+ <class name="MetricsException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException]]>
+ </doc>
+ </constructor>
+ <constructor name="MetricsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException
+
+ @param message an error message]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[General-purpose, unchecked metrics exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsException -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsRecord -->
+ <interface name="MetricsRecord" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value. The tagValue may be null,
+ which is treated the same as an empty String.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.
+
+ @param tagName name of a tag]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes, from the buffered data table, all rows having tags
+ that equal the tags that have been set on this record. For example,
+ if there are no tags on this record, all rows for this record name
+ would be removed. Or, if there is a single tag on this record, then
+ just rows containing a tag with the same name and value would be removed.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A named and optionally tagged set of records to be sent to the metrics
+ system. <p/>
+
+ A record name identifies the kind of data to be reported. For example, a
+ program reporting statistics relating to the disks on a computer might use
+ a record name "diskStats".<p/>
+
+ A record has zero or more <i>tags</i>. A tag has a name and a value. To
+ continue the example, the "diskStats" record might use a tag named
+ "diskName" to identify a particular disk. Sometimes it is useful to have
+ more than one tag, so there might also be a "diskType" with value "ide" or
+ "scsi" or whatever.<p/>
+
+ A record also has zero or more <i>metrics</i>. These are the named
+ values that are to be reported to the metrics system. In the "diskStats"
+ example, possible metric names would be "diskPercentFull", "diskPercentBusy",
+ "kbReadPerSecond", etc.<p/>
+
+ The general procedure for using a MetricsRecord is to fill in its tag and
+ metric values, and then call <code>update()</code> to pass the record to the
+ client library.
+ Metric data is not immediately sent to the metrics system
+ each time that <code>update()</code> is called.
+ An internal table is maintained, identified by the record name. This
+ table has columns
+ corresponding to the tag and the metric names, and rows
+ corresponding to each unique set of tag values. An update
+ either modifies an existing row in the table, or adds a new row with a set of
+ tag values that are different from all the other rows. Note that if there
+ are no tags, then there can be at most one row in the table. <p/>
+
+ Once a row is added to the table, its data will be sent to the metrics system
+ on every timer period, whether or not it has been updated since the previous
+ timer period. If this is inappropriate, for example if metrics were being
+ reported by some transient object in an application, the <code>remove()</code>
+ method can be used to remove the row and thus stop the data from being
+ sent.<p/>
+
+ Note that the <code>update()</code> method is atomic. This means that it is
+ safe for different threads to be updating the same metric. More precisely,
+ it is OK for different threads to call <code>update()</code> on MetricsRecord instances
+ with the same set of tag names and tag values. Different threads should
+ <b>not</b> use the same MetricsRecord instance at the same time.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsRecord -->
+ <!-- start class org.apache.hadoop.metrics.MetricsUtil -->
+ <class name="MetricsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to return the named context.
+ If the desired context cannot be created for any reason, the exception
+ is logged, and a null context is returned.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to create and return new metrics record instance within the
+ given context. This record is tagged with the host name.
+
+ @param context the context
+ @param recordName name of the record
+ @return newly created metrics record]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility class to simplify creation and reporting of hadoop metrics.
+
+ For examples of usage, see {@link org.apache.hadoop.dfs.DataNode}.
+ @see org.apache.hadoop.metrics.MetricsRecord
+ @see org.apache.hadoop.metrics.MetricsContext
+ @see org.apache.hadoop.metrics.ContextFactory]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsUtil -->
+ <!-- start interface org.apache.hadoop.metrics.Updater -->
+ <interface name="Updater" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Timer-based call-back from the metric library.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Call-back interface. See <code>MetricsContext.registerUpdater()</code>.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.Updater -->
+</package>
+<package name="org.apache.hadoop.metrics.file">
+ <!-- start class org.apache.hadoop.metrics.file.FileContext -->
+ <class name="FileContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="getFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the configured file name, or null.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, by opening in append-mode, the
+ file specified by the <code>fileName</code> attribute,
+ if specified. Otherwise the data will be written to standard
+ output.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring, closing the file.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Emits a metrics record to a file.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Flushes the output writer, forcing updates to disk.]]>
+ </doc>
+ </method>
+ <field name="FILE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="PERIOD_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Metrics context for writing metrics to a file.<p/>
+
+ This class is configured by setting ContextFactory attributes which in turn
+ are usually configured through a properties file. All the attributes are
+ prefixed by the contextName. For example, the properties file might contain:
+ <pre>
+ myContextName.fileName=/tmp/metrics.log
+ myContextName.period=5
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.file.FileContext -->
+</package>
+<package name="org.apache.hadoop.metrics.ganglia">
+ <!-- start class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+ <class name="GangliaContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GangliaContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of GangliaContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Context for sending metrics to Ganglia.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+</package>
+<package name="org.apache.hadoop.metrics.jvm">
+ <!-- start class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <class name="EventCounter" extends="org.apache.log4j.AppenderSkeleton"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="EventCounter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getFatal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getError" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWarn" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfo" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="requiresLayout" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A log4J Appender that simply counts logging events in three levels:
+ fatal, error and warn.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <!-- start class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+ <class name="JvmMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="init" return="org.apache.hadoop.metrics.jvm.JvmMetrics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="processName" type="java.lang.String"/>
+ <param name="sessionId" type="java.lang.String"/>
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[This will be called periodically (with the period being configuration
+ dependent).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Singleton class which eports Java Virtual Machine metrics to the metrics API.
+ Any application can create an instance of this class in order to emit
+ Java VM metrics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+</package>
+<package name="org.apache.hadoop.metrics.spi">
+ <!-- start class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <class name="AbstractMetricsContext" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsContext"/>
+ <constructor name="AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of AbstractMetricsContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ <doc>
+ <![CDATA[Initializes the context.]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for subclasses to access factory attributes.]]>
+ </doc>
+ </method>
+ <method name="getAttributeTable" return="java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="tableName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an attribute-value map derived from the factory attributes
+ by finding all factory attributes that begin with
+ <i>contextName</i>.<i>tableName</i>. The returned map consists of
+ those attributes with the contextName and tableName stripped off.]]>
+ </doc>
+ </method>
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.]]>
+ </doc>
+ </method>
+ <method name="getContextFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the factory by which this context was created.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free buffered data.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and frees buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new AbstractMetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="newRecord" return="org.apache.hadoop.metrics.spi.MetricsRecordImpl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Subclasses should override this if they subclass MetricsRecordImpl.
+ @param recordName the name of the record
+ @return newly created instance of MetricsRecordImpl or subclass]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at time intervals determined by
+ the configuration.
+
+ @param updater object to be run periodically; it should update
+ some metrics records]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sends a record to the metrics system.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called each period after all records have been emitted, this method does nothing.
+ Subclasses may override it in order to perform some kind of flush.]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.update(). Creates or updates a row in
+ the internal table of metric data.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.remove(). Removes all matching rows in
+ the internal table of metric data. A row matches if it has the same
+ tag names and values as record, but it may also have additional
+ tags.]]>
+ </doc>
+ </method>
+ <method name="getPeriod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the timer period.]]>
+ </doc>
+ </method>
+ <method name="setPeriod"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="period" type="int"/>
+ <doc>
+ <![CDATA[Sets the timer period]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The main class of the Service Provider Interface. This class should be
+ extended in order to integrate the Metrics API with a specific metrics
+ client library. <p/>
+
+ This class implements the internal table of metric data, and the timer
+ on which data is to be sent to the metrics system. Subclasses must
+ override the abstract <code>emitRecord</code> method in order to transmit
+ the data. <p/>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <class name="MetricsRecordImpl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsRecord"/>
+ <constructor name="MetricsRecordImpl" type="java.lang.String, org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileRecord]]>
+ </doc>
+ </constructor>
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes the row, if it exists, in the buffered data table having tags
+ that equal the tags that have been set on this record.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of MetricsRecord. Keeps a back-pointer to the context
+ from which it was created, and delegates back to it on <code>update</code>
+ and <code>remove()</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricValue -->
+ <class name="MetricValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricValue" type="java.lang.Number, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricValue]]>
+ </doc>
+ </constructor>
+ <method name="isIncrement" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumber" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="ABSOLUTE" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCREMENT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Number that is either an absolute or an incremental amount.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricValue -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContext -->
+ <class name="NullContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContext]]>
+ </doc>
+ </constructor>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do-nothing version of startMonitoring]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Null metrics context: a metrics context which does nothing. Used as the
+ default context, so that no performance data is emitted if no configuration
+ data is found.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <class name="NullContextWithUpdateThread" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContextWithUpdateThread"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContextWithUpdateThread]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A null context which has a thread calling
+ periodically when monitoring is started. This keeps the data sampled
+ correctly.
+ In all other respects, this is like the NULL context: No data is emitted.
+ This is suitable for Monitoring systems like JMX which reads the metrics
+ when someone reads the data from JMX.
+
+ The default impl of start and stop monitoring:
+ is the AbstractMetricsContext is good enough.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <!-- start class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <class name="OutputRecord" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTagNames" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of tag names]]>
+ </doc>
+ </method>
+ <method name="getTag" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a tag object which is can be a String, Integer, Short or Byte.
+
+ @return the tag value, or null if there is no such tag]]>
+ </doc>
+ </method>
+ <method name="getMetricNames" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of metric names.]]>
+ </doc>
+ </method>
+ <method name="getMetric" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the metric object which can be a Float, Integer, Short or Byte.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents a record of metric data to be sent to a metrics system.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <!-- start class org.apache.hadoop.metrics.spi.Util -->
+ <class name="Util" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="parse" return="java.util.List&lt;java.net.InetSocketAddress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="specs" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Parses a space and/or comma separated sequence of server specifications
+ of the form <i>hostname</i> or <i>hostname:port</i>. If
+ the specs string is null, defaults to localhost:defaultPort.
+
+ @return a list of InetSocketAddress objects.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Static utility methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.Util -->
+</package>
+<package name="org.apache.hadoop.metrics.util">
+ <!-- start class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <class name="MBeanUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MBeanUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="registerMBean" return="javax.management.ObjectName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="serviceName" type="java.lang.String"/>
+ <param name="nameName" type="java.lang.String"/>
+ <param name="theMbean" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Register the mbean using out standard MBeanName format
+ "hadoop.dfs:service=<serviceName>,name=<nameName>"
+ Where the <serviceName> and <nameName> are the supplied parameters
+
+ @param serviceName
+ @param nameName
+ @param theMbean - the MBean to register
+ @return the named used to register the MBean]]>
+ </doc>
+ </method>
+ <method name="unregisterMBean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mbeanName" type="javax.management.ObjectName"/>
+ </method>
+ <doc>
+ <![CDATA[This util class provides a method to register an MBean using
+ our standard naming convention as described in the doc
+ for {link {@link #registerMBean(String, String, Object)}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <class name="MetricsIntValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsIntValue" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="int"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - value to be added]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param decr - value to subtract]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Dec metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsIntValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsLongValue -->
+ <class name="MetricsLongValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsLongValue" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="long"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - value to be added]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decr" type="long"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param decr - value to subtract]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Dec metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsLongValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsLongValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <class name="MetricsTimeVaryingInt" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingInt" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - number of operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #previousIntervalValue}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Value at the Previous interval
+ @return prev interval value]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingInt class is for a metric that naturally
+ varies over time (e.g. number of files created).
+ The metric is is published at interval heart beat (the interval
+ is set in the metrics config file).
+ Note if one wants a time associated with the metric then use
+ @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+ <class name="MetricsTimeVaryingRate" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingRate" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param n the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numOps" type="int"/>
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for numOps operations
+ @param numOps - number of operations
+ @param time - time for numOps operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for one operation
+ @param time for one operation]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and
+ {@link #getPreviousIntervalNumOps()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalNumOps" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of operations in the previous interval
+ @return - ops in prev interval]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalAverageTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The average rate of an operation in the previous interval
+ @return - the average rate.]]>
+ </doc>
+ </method>
+ <method name="getMinTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The min time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return min time for an operation]]>
+ </doc>
+ </method>
+ <method name="getMaxTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The max time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return max time for an operation]]>
+ </doc>
+ </method>
+ <method name="resetMinMax"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the min max values]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingRate class is for a rate based metric that
+ naturally varies over time (e.g. time taken to create a file).
+ The rate is averaged at each interval heart beat (the interval
+ is set in the metrics config file).
+ This class also keeps track of the min and max rates along with
+ a method to reset the min-max.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+</package>
+<package name="org.apache.hadoop.net">
+ <!-- start class org.apache.hadoop.net.DNS -->
+ <class name="DNS" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DNS"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reverseDns" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hostIp" type="java.net.InetAddress"/>
+ <param name="ns" type="java.lang.String"/>
+ <exception name="NamingException" type="javax.naming.NamingException"/>
+ <doc>
+ <![CDATA[Returns the hostname associated with the specified IP address by the
+ provided nameserver.
+
+ @param hostIp
+ The address to reverse lookup
+ @param ns
+ The host name of a reachable DNS server
+ @return The host name associated with the provided IP
+ @throws NamingException
+ If a NamingException is encountered]]>
+ </doc>
+ </method>
+ <method name="getIPs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the IPs associated with the provided interface, if any, in
+ textual form.
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return A string vector of all the IPs associated with the provided
+ interface
+ @throws UnknownHostException
+ If an UnknownHostException is encountered in querying the
+ default interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultIP" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the first available IP address associated with the provided
+ network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The IP address in text form
+ @throws UnknownHostException
+ If one is encountered in querying the default interface]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the provided nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return A string vector of all host names associated with the IPs tied to
+ the specified interface
+ @throws UnknownHostException]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the default nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The list of host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the provided
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return The default host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the default
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The default host name associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides direct and reverse lookup functionalities, allowing
+ the querying of specific network interfaces or nameservers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.DNS -->
+ <!-- start interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <interface name="DNSToSwitchMapping" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="resolve" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List&lt;java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[Resolves a list of DNS-names/IP-addresses and returns back a list of
+ switch information (network paths). One-to-one correspondence must be
+ maintained between the elements in the lists.
+ Consider an element in the argument list - x.y.com. The switch information
+ that is returned must be a network path of the form /foo/rack,
+ where / is the root, and 'foo' is the switch where 'rack' is connected.
+ Note the hostname/ip-address is not part of the returned path.
+ The network topology of the cluster would determine the number of
+ components in the network path.
+ @param names
+ @return list of resolved network paths]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An interface that should be implemented to allow pluggable
+ DNS-name/IP-address to RackID resolvers.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <!-- start class org.apache.hadoop.net.NetUtils -->
+ <class name="NetUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="clazz" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Get the socket factory for the given class according to its
+ configuration parameter
+ <tt>hadoop.rpc.socket.factory.class.&lt;ClassName&gt;</tt>. When no
+ such parameter exists then fall back on the default socket factory as
+ configured by <tt>hadoop.rpc.socket.factory.class.default</tt>. If
+ this default socket factory is not configured, then fall back on the JVM
+ default socket factory.
+
+ @param conf the configuration
+ @param clazz the class (usually a {@link VersionedProtocol})
+ @return a socket factory]]>
+ </doc>
+ </method>
+ <method name="getDefaultSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default socket factory as specified by the configuration
+ parameter <tt>hadoop.rpc.socket.factory.default</tt>
+
+ @param conf the configuration
+ @return the default socket factory as specified in the configuration or
+ the JVM default socket factory if the configuration does not
+ contain a default socket factory property.]]>
+ </doc>
+ </method>
+ <method name="getSocketFactoryFromProperty" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="propValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the socket factory corresponding to the given proxy URI. If the
+ given proxy URI corresponds to an absence of configuration parameter,
+ returns null. If the URI is malformed raises an exception.
+
+ @param propValue the property which is the class name of the
+ SocketFactory to instantiate; assumed non null and non empty.
+ @return a socket factory as defined in the property value.]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="getServerAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="oldBindAddressName" type="java.lang.String"/>
+ <param name="oldPortName" type="java.lang.String"/>
+ <param name="newBindAddressName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Handle the transition from pairs of attributes specifying a host and port
+ to a single colon separated one.
+ @param conf the configuration to check
+ @param oldBindAddressName the old address attribute name
+ @param oldPortName the old port attribute name
+ @param newBindAddressName the new combined name
+ @return the complete address from the configuration]]>
+ </doc>
+ </method>
+ <method name="addStaticResolution"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="resolvedName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a static resolution for host. This can be used for setting up
+ hostnames with names that are fake to point to a well known host. For e.g.
+ in some testcases we require to have daemons with different hostnames
+ running on the same machine. In order to create connections to these
+ daemons, one can set up mappings from those hostnames to "localhost".
+ {@link NetUtils#getStaticResolution(String)} can be used to query for
+ the actual hostname.
+ @param host
+ @param resolvedName]]>
+ </doc>
+ </method>
+ <method name="getStaticResolution" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Retrieves the resolved name for the passed host. The resolved name must
+ have been set earlier using
+ {@link NetUtils#addStaticResolution(String, String)}
+ @param host
+ @return the resolution]]>
+ </doc>
+ </method>
+ <method name="getAllStaticResolutions" return="java.util.List&lt;java.lang.String[]&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is used to get all the resolutions that were added using
+ {@link NetUtils#addStaticResolution(String, String)}. The return
+ value is a List each element of which contains an array of String
+ of the form String[0]=hostname, String[1]=resolved-hostname
+ @return the list of resolutions]]>
+ </doc>
+ </method>
+ <method name="getConnectAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="server" type="org.apache.hadoop.ipc.Server"/>
+ <doc>
+ <![CDATA[Returns InetSocketAddress that a client can use to
+ connect to the server. Server.getListenerAddress() is not correct when
+ the server binds to "0.0.0.0". This returns "127.0.0.1:port" when
+ the getListenerAddress() returns "0.0.0.0:port".
+
+ @param server
+ @return socket address that a client can use to connect to the server.]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getInputStream(socket, socket.getSoTimeout()).<br><br>
+
+ From documentation for {@link #getInputStream(Socket, long)}:<br>
+ Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see #getInputStream(Socket, long)
+
+ @param socket
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getOutputStream(socket, 0). Timeout of zero implies write will
+ wait until data is available.<br><br>
+
+ From documentation for {@link #getOutputStream(Socket, long)} : <br>
+ Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see #getOutputStream(Socket, long)
+
+ @param socket
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetUtils -->
+ <!-- start class org.apache.hadoop.net.NetworkTopology -->
+ <class name="NetworkTopology" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetworkTopology"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Add a leaf node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be added
+ @exception IllegalArgumentException if add a node to a leave
+ or node to be added is not a leaf]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Remove a node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be removed]]>
+ </doc>
+ </method>
+ <method name="contains" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if the tree contains node <i>node</i>
+
+ @param node
+ a node
+ @return true if <i>node</i> is already in the tree; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="loc" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a string representation of a node, return its reference
+
+ @param loc
+ a path-like string representation of a node
+ @return a reference to the node; null if the node is not in the tree]]>
+ </doc>
+ </method>
+ <method name="getNumOfRacks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of racks]]>
+ </doc>
+ </method>
+ <method name="getNumOfLeaves" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of nodes]]>
+ </doc>
+ </method>
+ <method name="getDistance" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return the distance between two nodes
+ It is assumed that the distance from one node to its parent is 1
+ The distance between two nodes is calculated by summing up their distances
+ to their closest common ancestor.
+ @param node1 one node
+ @param node2 another node
+ @return the distance between node1 and node2
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="isOnSameRack" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if two nodes are on the same rack
+ @param node1 one node
+ @param node2 another node
+ @return true if node1 and node2 are pm the same rack; false otherwise
+ @exception IllegalArgumentException when either node1 or node2 is null, or
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="chooseRandom" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <doc>
+ <![CDATA[randomly choose one node from <i>scope</i>
+ if scope starts with ~, choose one from the all nodes except for the
+ ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
+ @param scope range of nodes from which a node will be choosen
+ @return the choosen node]]>
+ </doc>
+ </method>
+ <method name="countNumOfAvailableNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <param name="excludedNodes" type="java.util.List&lt;org.apache.hadoop.net.Node&gt;"/>
+ <doc>
+ <![CDATA[return the number of leaves in <i>scope</i> but not in <i>excludedNodes</i>
+ if scope starts with ~, return the number of nodes that are not
+ in <i>scope</i> and <i>excludedNodes</i>;
+ @param scope a path string that may start with ~
+ @param excludedNodes a list of nodes
+ @return number of available nodes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[convert a network tree to a string]]>
+ </doc>
+ </method>
+ <method name="pseudoSortByDistance"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reader" type="org.apache.hadoop.net.Node"/>
+ <param name="nodes" type="org.apache.hadoop.net.Node[]"/>
+ <doc>
+ <![CDATA[Sort nodes array by their distances to <i>reader</i>
+ It linearly scans the array, if a local node is found, swap it with
+ the first element of the array.
+ If a local rack node is found, swap it with the first element following
+ the local node.
+ If neither local node or local rack node is found, put a random replica
+ location at postion 0.
+ It leaves the rest nodes untouched.]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_RACK" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UNRESOLVED" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_HOST_LEVEL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The class represents a cluster of computer with a tree hierarchical
+ network topology.
+ For example, a cluster may be consists of many data centers filled
+ with racks of computers.
+ In a network topology, leaves represent data nodes (computers) and inner
+ nodes represent switches/routers that manage traffic in/out of data centers
+ or racks.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetworkTopology -->
+ <!-- start interface org.apache.hadoop.net.Node -->
+ <interface name="Node" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the string representation of this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the node's network location]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface defines a node in a network topology.
+ A node may be a leave representing a data node or an inner
+ node representing a datacenter or rack.
+ Each data has a name and its location in the network is
+ decided by a string with syntax similar to a file name.
+ For example, a data node's name is hostname:port# and if it's located at
+ rack "orange" in datacenter "dog", the string representation of its
+ network location is /dog/orange]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.Node -->
+ <!-- start class org.apache.hadoop.net.NodeBase -->
+ <class name="NodeBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.Node"/>
+ <constructor name="NodeBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its path
+ @param path
+ a concatenation of this node's location, the path seperator, and its name]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String, org.apache.hadoop.net.Node, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location
+ @param parent this node's parent node
+ @param level this node's level in the tree]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set this node's network location]]>
+ </doc>
+ </method>
+ <method name="getPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return this node's path]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's string representation]]>
+ </doc>
+ </method>
+ <method name="normalize" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Normalize a path]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="level" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree]]>
+ </doc>
+ </method>
+ <field name="PATH_SEPARATOR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PATH_SEPARATOR_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ROOT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="name" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="location" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="level" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="parent" type="org.apache.hadoop.net.Node"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class that implements interface Node]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NodeBase -->
+ <!-- start class org.apache.hadoop.net.ScriptBasedMapping -->
+ <class name="ScriptBasedMapping" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.net.DNSToSwitchMapping"/>
+ <constructor name="ScriptBasedMapping"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="resolve" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List&lt;java.lang.String&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[This class implements the {@link DNSToSwitchMapping} interface using a
+ script configured via topology.script.file.name .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.ScriptBasedMapping -->
+ <!-- start class org.apache.hadoop.net.SocketInputStream -->
+ <class name="SocketInputStream" extends="java.io.InputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.ReadableByteChannel"/>
+ <constructor name="SocketInputStream" type="java.nio.channels.ReadableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for reading, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), timeout): <br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), socket.getSoTimeout())
+ :<br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.ReadableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by inputstream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferFrom(ReadableByteChannel, long, long)}.]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="waitForReadable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[waits for the underlying channel to be ready for reading.
+ The timeout specified for this stream applies to this wait.
+
+ @throws SocketTimeoutException
+ if select on the channel times out.
+ @throws IOException
+ if any other I/O error occurs.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This implements an input stream that can have a timeout while reading.
+ This sets non-blocking flag on the socket channel.
+ So after create this object, read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} for the associated socket will throw
+ IllegalBlockingModeException.
+ Please use {@link SocketOutputStream} for writing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketInputStream -->
+ <!-- start class org.apache.hadoop.net.SocketOutputStream -->
+ <class name="SocketOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.WritableByteChannel"/>
+ <constructor name="SocketOutputStream" type="java.nio.channels.WritableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for writing, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketOutputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketOutputStream(socket.getChannel(), timeout):<br><br>
+
+ Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketOutputStream#SocketOutputStream(WritableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.WritableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by this stream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="waitForWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[waits for the underlying channel to be ready for writing.
+ The timeout specified for this stream applies to this wait.
+
+ @throws SocketTimeoutException
+ if select on the channel times out.
+ @throws IOException
+ if any other I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="transferToFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileCh" type="java.nio.channels.FileChannel"/>
+ <param name="position" type="long"/>
+ <param name="count" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Transfers data from FileChannel using
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}.
+
+ Similar to readFully(), this waits till requested amount of
+ data is transfered.
+
+ @param fileCh FileChannel to transfer data from.
+ @param position position within the channel where the transfer begins
+ @param count number of bytes to transfer.
+
+ @throws EOFException
+ If end of input file is reached before requested number of
+ bytes are transfered.
+
+ @throws SocketTimeoutException
+ If this channel blocks transfer longer than timeout for
+ this stream.
+
+ @throws IOException Includes any exception thrown by
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This implements an output stream that can have a timeout while writing.
+ This sets non-blocking flag on the socket channel.
+ So after creating this object , read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} on the associated socket will throw
+ llegalBlockingModeException.
+ Please use {@link SocketInputStream} for reading.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketOutputStream -->
+ <!-- start class org.apache.hadoop.net.SocksSocketFactory -->
+ <class name="SocksSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="SocksSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <constructor name="SocksSocketFactory" type="java.net.Proxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with a supplied Proxy
+
+ @param proxy the proxy to use to create sockets]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocksSocketFactory -->
+ <!-- start class org.apache.hadoop.net.StandardSocketFactory -->
+ <class name="StandardSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StandardSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.StandardSocketFactory -->
+</package>
+<package name="org.apache.hadoop.record">
+ <!-- start class org.apache.hadoop.record.BinaryRecordInput -->
+ <class name="BinaryRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="BinaryRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordInput" type="java.io.DataInput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inp" type="java.io.DataInput"/>
+ <doc>
+ <![CDATA[Get a thread-local record input for the supplied DataInput.
+ @param inp data input stream
+ @return binary record input corresponding to the supplied DataInput.]]>
+ </doc>
+ </method>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordInput -->
+ <!-- start class org.apache.hadoop.record.BinaryRecordOutput -->
+ <class name="BinaryRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="BinaryRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordOutput" type="java.io.DataOutput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <doc>
+ <![CDATA[Get a thread-local record output for the supplied DataOutput.
+ @param out data output stream
+ @return binary record output corresponding to the supplied DataOutput.]]>
+ </doc>
+ </method>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordOutput -->
+ <!-- start class org.apache.hadoop.record.Buffer -->
+ <class name="Buffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Buffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-count sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte array as the initial value.
+
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[], int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte range as the initial value.
+
+ @param bytes Copy of this array becomes the backing storage for the object.
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Use the specified bytes array as underlying sequence.
+
+ @param bytes byte sequence]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Copy the specified byte array to the Buffer. Replaces the current buffer.
+
+ @param bytes byte array to be assigned
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the Buffer.
+
+ @return The data is only valid between 0 and getCount() - 1.]]>
+ </doc>
+ </method>
+ <method name="getCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current count of the buffer.]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum count that could handled without
+ resizing the backing storage.
+
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newCapacity" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved if newCapacity >= getCount().
+ @param newCapacity The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the buffer to 0 size]]>
+ </doc>
+ </method>
+ <method name="truncate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Change the capacity of the backing store to be the same as the current
+ count of buffer.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer.
+
+ @param bytes byte array to be appended
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer
+
+ @param bytes byte array to be appended]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Define the sort order of the Buffer.
+
+ @param other The other buffer
+ @return Positive if this is bigger than other, 0 if they are equal, and
+ negative if this is smaller than other.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="charsetName" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ <doc>
+ <![CDATA[Convert the byte buffer to a string an specific character encoding
+
+ @param charsetName Valid Java Character Set Name]]>
+ </doc>
+ </method>
+ <method name="clone" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="CloneNotSupportedException" type="java.lang.CloneNotSupportedException"/>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is used as a Java native type for buffer.
+ It is resizable and distinguishes between the count of the seqeunce and
+ the current capacity.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Buffer -->
+ <!-- start class org.apache.hadoop.record.CsvRecordInput -->
+ <class name="CsvRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="CsvRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordInput -->
+ <!-- start class org.apache.hadoop.record.CsvRecordOutput -->
+ <class name="CsvRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="CsvRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordOutput -->
+ <!-- start interface org.apache.hadoop.record.Index -->
+ <interface name="Index" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="done" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="incr"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Interface that acts as an iterator for deserializing maps.
+ The deserializer returns an instance that the record uses to
+ read vectors and maps. An example of usage is as follows:
+
+ <code>
+ Index idx = startVector(...);
+ while (!idx.done()) {
+ .... // read element of a vector
+ idx.incr();
+ }
+ </code>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.Index -->
+ <!-- start class org.apache.hadoop.record.Record -->
+ <class name="Record" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Record"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="serialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record with tag (ususally field name)
+ @param rout Record output destination
+ @param tag record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record with a tag (usually field name)
+ @param rin Record input source
+ @param tag Record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record without a tag
+ @param rout Record output destination]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record without a tag
+ @param rin Record input source]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="din" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Abstract class that is extended by generated classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Record -->
+ <!-- start class org.apache.hadoop.record.RecordComparator -->
+ <class name="RecordComparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordComparator" type="java.lang.Class"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a raw {@link Record} comparison implementation.]]>
+ </doc>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.record.RecordComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link Record} implementation.
+
+ @param c record classs for which a raw comparator is provided
+ @param comparator Raw comparator instance for class c]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A raw record comparator base class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.RecordComparator -->
+ <!-- start interface org.apache.hadoop.record.RecordInput -->
+ <interface name="RecordInput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a byte from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a boolean from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a long integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a single-precision float from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a double-precision number from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read byte array from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of elements.]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of map entries.]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that all the Deserializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordInput -->
+ <!-- start interface org.apache.hadoop.record.RecordOutput -->
+ <interface name="RecordOutput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a byte to serialized record.
+ @param b Byte to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a boolean to serialized record.
+ @param b Boolean to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write an integer to serialized record.
+ @param i Integer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a long integer to serialized record.
+ @param l Long to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a single-precision float to serialized record.
+ @param f Float to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a double precision floating point number to serialized record.
+ @param d Double to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a unicode string to serialized record.
+ @param s String to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a buffer to serialized record.
+ @param buf Buffer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a record to be serialized.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized record.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a vector to be serialized.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized vector.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a map to be serialized.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized map.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that alll the serializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordOutput -->
+ <!-- start class org.apache.hadoop.record.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a stream and return it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a stream and returns it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an int to a binary stream with zero-compressed encoding.
+
+ @param stream Binary output stream
+ @param i int to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <field name="hexchars" type="char[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Utils -->
+ <!-- start class org.apache.hadoop.record.XmlRecordInput -->
+ <class name="XmlRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="XmlRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Deserializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordInput -->
+ <!-- start class org.apache.hadoop.record.XmlRecordOutput -->
+ <class name="XmlRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="XmlRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Serializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordOutput -->
+</package>
+<package name="org.apache.hadoop.record.compiler">
+ <!-- start class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <class name="CodeBuffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A wrapper around StringBuffer that automatically does indentation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.Consts -->
+ <class name="Consts" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="RIO_PREFIX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_VAR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER_FIELDS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_OUTPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_INPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TAG" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[const definitions for Record I/O compiler]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.Consts -->
+ <!-- start class org.apache.hadoop.record.compiler.JBoolean -->
+ <class name="JBoolean" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBoolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBoolean]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBoolean -->
+ <!-- start class org.apache.hadoop.record.compiler.JBuffer -->
+ <class name="JBuffer" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBuffer]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "buffer" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.JByte -->
+ <class name="JByte" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JByte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "byte" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JByte -->
+ <!-- start class org.apache.hadoop.record.compiler.JDouble -->
+ <class name="JDouble" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JDouble"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JDouble]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JDouble -->
+ <!-- start class org.apache.hadoop.record.compiler.JField -->
+ <class name="JField" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JField" type="java.lang.String, T"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JField]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[A thin wrappper around record field.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JField -->
+ <!-- start class org.apache.hadoop.record.compiler.JFile -->
+ <class name="JFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFile" type="java.lang.String, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JFile&gt;, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFile
+
+ @param name possibly full pathname to the file
+ @param inclFiles included files (as JFile)
+ @param recList List of records defined within this file]]>
+ </doc>
+ </constructor>
+ <method name="genCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <param name="destDir" type="java.lang.String"/>
+ <param name="options" type="java.util.ArrayList&lt;java.lang.String&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate record code in given language. Language should be all
+ lowercase.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Container for the Hadoop Record DDL.
+ The main components of the file are filename, list of included files,
+ and records defined in that file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFile -->
+ <!-- start class org.apache.hadoop.record.compiler.JFloat -->
+ <class name="JFloat" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFloat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFloat]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFloat -->
+ <!-- start class org.apache.hadoop.record.compiler.JInt -->
+ <class name="JInt" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JInt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JInt]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "int" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JInt -->
+ <!-- start class org.apache.hadoop.record.compiler.JLong -->
+ <class name="JLong" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JLong"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JLong]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "long" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JLong -->
+ <!-- start class org.apache.hadoop.record.compiler.JMap -->
+ <class name="JMap" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JMap" type="org.apache.hadoop.record.compiler.JType, org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JMap]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JMap -->
+ <!-- start class org.apache.hadoop.record.compiler.JRecord -->
+ <class name="JRecord" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JRecord" type="java.lang.String, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JField&lt;org.apache.hadoop.record.compiler.JType&gt;&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JRecord]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JRecord -->
+ <!-- start class org.apache.hadoop.record.compiler.JString -->
+ <class name="JString" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JString"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JString]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JString -->
+ <!-- start class org.apache.hadoop.record.compiler.JType -->
+ <class name="JType" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Abstract Base class for all types supported by Hadoop Record I/O.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JType -->
+ <!-- start class org.apache.hadoop.record.compiler.JVector -->
+ <class name="JVector" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JVector" type="org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JVector]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JVector -->
+</package>
+<package name="org.apache.hadoop.record.compiler.ant">
+ <!-- start class org.apache.hadoop.record.compiler.ant.RccTask -->
+ <class name="RccTask" extends="org.apache.tools.ant.Task"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RccTask"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of RccTask]]>
+ </doc>
+ </constructor>
+ <method name="setLanguage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the output language option
+ @param language "java"/"c++"]]>
+ </doc>
+ </method>
+ <method name="setFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets the record definition file attribute
+ @param file record definition file]]>
+ </doc>
+ </method>
+ <method name="setFailonerror"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="flag" type="boolean"/>
+ <doc>
+ <![CDATA[Given multiple files (via fileset), set the error handling behavior
+ @param flag true will throw build exception in case of failure (default)]]>
+ </doc>
+ </method>
+ <method name="setDestdir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets directory where output files will be generated
+ @param dir output directory]]>
+ </doc>
+ </method>
+ <method name="addFileset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="set" type="org.apache.tools.ant.types.FileSet"/>
+ <doc>
+ <![CDATA[Adds a fileset that can consist of one or more files
+ @param set Set of record definition files]]>
+ </doc>
+ </method>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="BuildException" type="org.apache.tools.ant.BuildException"/>
+ <doc>
+ <![CDATA[Invoke the Hadoop record compiler on each record definition file]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Hadoop record compiler ant Task
+<p> This task takes the given record definition files and compiles them into
+ java or c++
+ files. It is then up to the user to compile the generated files.
+
+ <p> The task requires the <code>file</code> or the nested fileset element to be
+ specified. Optional attributes are <code>language</code> (set the output
+ language, default is "java"),
+ <code>destdir</code> (name of the destination directory for generated java/c++
+ code, default is ".") and <code>failonerror</code> (specifies error handling
+ behavior. default is true).
+ <p><h4>Usage</h4>
+ <pre>
+ &lt;recordcc
+ destdir="${basedir}/gensrc"
+ language="java"&gt;
+ &lt;fileset include="**\/*.jr" /&gt;
+ &lt;/recordcc&gt;
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.ant.RccTask -->
+</package>
+<package name="org.apache.hadoop.record.compiler.generated">
+ <!-- start class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <class name="ParseException" extends="java.lang.Exception"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ParseException" type="org.apache.hadoop.record.compiler.generated.Token, int[][], java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constructor is used by the method "generateParseException"
+ in the generated parser. Calling this constructor generates
+ a new object of this type with the fields "currentToken",
+ "expectedTokenSequences", and "tokenImage" set. The boolean
+ flag "specialConstructor" is also set to true to indicate that
+ this constructor was used to create this object.
+ This constructor calls its super class with the empty string
+ to force the "toString" method of parent class "Throwable" to
+ print the error message in the form:
+ ParseException: <result of getMessage>]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The following constructors are for use by you for whatever
+ purpose you can think of. Constructing the exception in this
+ manner makes the exception behave in the normal way - i.e., as
+ documented in the class "Throwable". The fields "errorToken",
+ "expectedTokenSequences", and "tokenImage" do not contain
+ relevant information. The JavaCC generated code does not use
+ these constructors.]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method has the standard behavior when this object has been
+ created using the standard constructors. Otherwise, it uses
+ "currentToken" and "expectedTokenSequences" to generate a parse
+ error message and returns it. If this object has been created
+ due to a parse error, and you do not catch it (it gets thrown
+ from the parser), then this method is called during the printing
+ of the final stack trace, and hence the correct error message
+ gets displayed.]]>
+ </doc>
+ </method>
+ <method name="add_escapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Used to convert raw characters to their escaped version
+ when these raw version cannot be used as part of an ASCII
+ string literal.]]>
+ </doc>
+ </method>
+ <field name="specialConstructor" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This variable determines which constructor was used to create
+ this object and thereby affects the semantics of the
+ "getMessage" method (see below).]]>
+ </doc>
+ </field>
+ <field name="currentToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is the last token that has been consumed successfully. If
+ this object has been created due to a parse error, the token
+ followng this token will (therefore) be the first error token.]]>
+ </doc>
+ </field>
+ <field name="expectedTokenSequences" type="int[][]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Each entry in this array is an array of integers. Each array
+ of integers represents a sequence of tokens (by their ordinal
+ values) that is expected at this point of the parse.]]>
+ </doc>
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is a reference to the "tokenImage" array of the generated
+ parser within which the parse error occurred. This array is
+ defined in the generated ...Constants interface.]]>
+ </doc>
+ </field>
+ <field name="eol" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The end of line string for this machine.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This exception is thrown when parse errors are encountered.
+ You can explicitly create objects of this exception type by
+ calling the method generateParseException in the generated
+ parser.
+
+ You can modify this class to customize your error reporting
+ mechanisms so long as you retain the public fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <class name="Rcc" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="Rcc" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="usage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="driver" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="Input" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Include" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Module" return="java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ModuleName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="RecordList" return="java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Record" return="org.apache.hadoop.record.compiler.JRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Field" return="org.apache.hadoop.record.compiler.JField&lt;org.apache.hadoop.record.compiler.JType&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Type" return="org.apache.hadoop.record.compiler.JType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Map" return="org.apache.hadoop.record.compiler.JMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Vector" return="org.apache.hadoop.record.compiler.JVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tm" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"/>
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <method name="generateParseException" return="org.apache.hadoop.record.compiler.generated.ParseException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="enable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="disable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="token_source" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="token" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jj_nt" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <!-- start interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <interface name="RccConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="EOF" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MODULE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCLUDE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BOOLEAN_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SEMICOLON_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CSTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IDENT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinOneLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinMultiLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <class name="RccTokenManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setDebugStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ds" type="java.io.PrintStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="SwitchTo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="jjFillToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="debugStream" type="java.io.PrintStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjstrLiteralImages" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="lexStateNames" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjnewLexState" type="int[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="input_stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="curChar" type="char"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <class name="SimpleCharStream" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setTabSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="getTabSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="ExpandBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="wrapAround" type="boolean"/>
+ </method>
+ <method name="FillBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="BeginToken" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="UpdateLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="c" type="char"/>
+ </method>
+ <method name="readChar" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getEndColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getEndLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="backup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="amount" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="GetImage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="GetSuffix" return="char[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="int"/>
+ </method>
+ <method name="Done"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="adjustBeginLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newLine" type="int"/>
+ <param name="newCol" type="int"/>
+ <doc>
+ <![CDATA[Method to adjust line and column numbers for the start of a token.]]>
+ </doc>
+ </method>
+ <field name="staticFlag" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufpos" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufline" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufcolumn" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="column" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="line" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsCR" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsLF" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputStream" type="java.io.Reader"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="buffer" type="char[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="maxNextCharInd" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inBuf" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="tabSize" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of interface CharStream, where the stream is assumed to
+ contain only ASCII characters (without unicode processing).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Token -->
+ <class name="Token" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Token"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the image.]]>
+ </doc>
+ </method>
+ <method name="newToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="ofKind" type="int"/>
+ <doc>
+ <![CDATA[Returns a new Token object, by default. However, if you want, you
+ can create and return subclass objects based on the value of ofKind.
+ Simply add the cases to the switch for all those special cases.
+ For example, if you have a subclass of Token called IDToken that
+ you want to create if ofKind is ID, simlpy add something like :
+
+ case MyParserConstants.ID : return new IDToken();
+
+ to the following switch statement. Then you can cast matchedToken
+ variable to the appropriate type and use it in your lexical actions.]]>
+ </doc>
+ </method>
+ <field name="kind" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[An integer that describes the kind of this token. This numbering
+ system is determined by JavaCCParser, and a table of these numbers is
+ stored in the file ...Constants.java.]]>
+ </doc>
+ </field>
+ <field name="beginLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="beginColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="image" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The string image of the token.]]>
+ </doc>
+ </field>
+ <field name="next" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A reference to the next regular (non-special) token from the input
+ stream. If this is the last token from the input stream, or if the
+ token manager has not read tokens beyond this one, this field is
+ set to null. This is true only if this token is also a regular
+ token. Otherwise, see below for a description of the contents of
+ this field.]]>
+ </doc>
+ </field>
+ <field name="specialToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This field is used to access special tokens that occur prior to this
+ token, but after the immediately preceding regular (non-special) token.
+ If there are no such special tokens, this field is set to null.
+ When there are more than one such special token, this field refers
+ to the last of these special tokens, which in turn refers to the next
+ previous special token through its specialToken field, and so on
+ until the first special token (whose specialToken field is null).
+ The next fields of special tokens refer to other special tokens that
+ immediately follow it (without an intervening regular token). If there
+ is no such token, this field is null.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Describes the input token stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Token -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+ <class name="TokenMgrError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TokenMgrError"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="boolean, int, int, int, java.lang.String, char, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addEscapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Replaces unprintable characters by their espaced (or unicode escaped)
+ equivalents in the given string]]>
+ </doc>
+ </method>
+ <method name="LexicalError" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="EOFSeen" type="boolean"/>
+ <param name="lexState" type="int"/>
+ <param name="errorLine" type="int"/>
+ <param name="errorColumn" type="int"/>
+ <param name="errorAfter" type="java.lang.String"/>
+ <param name="curChar" type="char"/>
+ <doc>
+ <![CDATA[Returns a detailed message for the Error when it is thrown by the
+ token manager to indicate a lexical error.
+ Parameters :
+ EOFSeen : indicates if EOF caused the lexicl error
+ curLexState : lexical state in which this error occured
+ errorLine : line number when the error occured
+ errorColumn : column number when the error occured
+ errorAfter : prefix that was seen before this error occured
+ curchar : the offending character
+ Note: You can customize the lexical error message by modifying this method.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[You can also modify the body of this method to customize your error messages.
+ For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not
+ of end-users concern, so you can return something like :
+
+ "Internal Error : Please file a bug report .... "
+
+ from this method for such cases in the release version of your parser.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+</package>
+<package name="org.apache.hadoop.record.meta">
+ <!-- start class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <class name="FieldTypeInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's TypeID object]]>
+ </doc>
+ </method>
+ <method name="getFieldID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's id (name)]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two FieldTypeInfos are equal if ach of their fields matches]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ti" type="org.apache.hadoop.record.meta.FieldTypeInfo"/>
+ </method>
+ <doc>
+ <![CDATA[Represents a type information for a field, which is made up of its
+ ID (name) and its type (a TypeID object).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.MapTypeID -->
+ <class name="MapTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapTypeID" type="org.apache.hadoop.record.meta.TypeID, org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getKeyTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's key element]]>
+ </doc>
+ </method>
+ <method name="getValueTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's value element]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two map typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a Map]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.MapTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <class name="RecordTypeInfo" extends="org.apache.hadoop.record.Record"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty RecordTypeInfo object.]]>
+ </doc>
+ </constructor>
+ <constructor name="RecordTypeInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a RecordTypeInfo object representing a record with the given name
+ @param name Name of the record]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the name of the record]]>
+ </doc>
+ </method>
+ <method name="setName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[set the name of the record]]>
+ </doc>
+ </method>
+ <method name="addField"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fieldName" type="java.lang.String"/>
+ <param name="tid" type="org.apache.hadoop.record.meta.TypeID"/>
+ <doc>
+ <![CDATA[Add a field.
+ @param fieldName Name of the field
+ @param tid Type ID of the field]]>
+ </doc>
+ </method>
+ <method name="getFieldTypeInfos" return="java.util.Collection&lt;org.apache.hadoop.record.meta.FieldTypeInfo&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a collection of field type infos]]>
+ </doc>
+ </method>
+ <method name="getNestedStructTypeInfo" return="org.apache.hadoop.record.meta.RecordTypeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the type info of a nested record. We only consider nesting
+ to one level.
+ @param name Name of the nested record]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer_" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ <doc>
+ <![CDATA[This class doesn't implement Comparable as it's not meant to be used
+ for anything besides de/serializing.
+ So we always throw an exception.
+ Not implemented. Always returns 0 if another RecordTypeInfo is passed in.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A record's Type Information object which can read/write itself.
+
+ Type information for a record comprises metadata about the record,
+ as well as a collection of type information for each field in the record.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.StructTypeID -->
+ <class name="StructTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StructTypeID" type="org.apache.hadoop.record.meta.RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a StructTypeID based on the RecordTypeInfo of some record]]>
+ </doc>
+ </constructor>
+ <method name="getFieldTypeInfos" return="java.util.Collection&lt;org.apache.hadoop.record.meta.FieldTypeInfo&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a struct]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.StructTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID -->
+ <class name="TypeID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeVal" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type value. One of the constants in RIOType.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two base typeIDs are equal if they refer to the same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <field name="BoolTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constant classes for the basic types, so we can share them.]]>
+ </doc>
+ </field>
+ <field name="BufferTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ByteTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DoubleTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FloatTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IntTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LongTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="StringTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="typeVal" type="byte"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Represents typeID for basic types.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <class name="TypeID.RIOType" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TypeID.RIOType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <field name="BOOL" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRUCT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[constants representing the IDL types we support]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <!-- start class org.apache.hadoop.record.meta.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <param name="typeID" type="org.apache.hadoop.record.meta.TypeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[read/skip bytes from stream based on a type]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O platform.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.Utils -->
+ <!-- start class org.apache.hadoop.record.meta.VectorTypeID -->
+ <class name="VectorTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VectorTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getElementTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two vector typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for vector.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.VectorTypeID -->
+</package>
+<package name="org.apache.hadoop.security">
+ <!-- start class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <class name="UnixUserGroupInformation" extends="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnixUserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameters user name and its group names.
+ The first entry in the groups list is the default group.
+
+ @param userName a user's name
+ @param groupNames groups list, first of which is the default group
+ @exception IllegalArgumentException if any argument is null]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameter user/group names
+
+ @param ugi an array containing user/group names, the first
+ element of which is the user name, the second of
+ which is the default group name.
+ @exception IllegalArgumentException if the array size is less than 2
+ or any element is null.]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Create an immutable {@link UnixUserGroupInformation} object.]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an array of group names]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the user's name]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize this object
+ First check if this is a UGI in the string format.
+ If no, throw an IOException; otherwise
+ set this object's fields by reading them from the given data input
+
+ @param in input stream
+ @exception IOException is thrown if encounter any error when reading]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize this object
+ First write a string marking that this is a UGI in the string format,
+ then write this object's serialized form to the given data output
+
+ @param out output stream
+ @exception IOException if encounter any error during writing]]>
+ </doc>
+ </method>
+ <method name="saveToConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <param name="ugi" type="org.apache.hadoop.security.UnixUserGroupInformation"/>
+ <doc>
+ <![CDATA[Store the given <code>ugi</code> as a comma separated string in
+ <code>conf</code> as a property <code>attr</code>
+
+ The String starts with the user name followed by the default group names,
+ and other group names.
+
+ @param conf configuration
+ @param attr property name
+ @param ugi a UnixUserGroupInformation]]>
+ </doc>
+ </method>
+ <method name="readFromConf" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Read a UGI from the given <code>conf</code>
+
+ The object is expected to store with the property name <code>attr</code>
+ as a comma separated string that starts
+ with the user name followed by group names.
+ If the property name is not defined, return null.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise, construct a UGI from the configuration, store it in the
+ ugi map and return it.
+
+ @param conf configuration
+ @param attr property name
+ @return a UnixUGI
+ @throws LoginException if the stored string is ill-formatted.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get current user's name and the names of all its groups from Unix.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise get the current user's information from Unix, store it
+ in the map, and return it.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Equivalent to login(conf, false).]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="save" type="boolean"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get a user's name & its group names from the given configuration;
+ If it is not defined in the configuration, get the current user's
+ information from Unix.
+ If the user has a UGI in the ugi map, return the one in
+ the UGI map.
+
+ @param conf either a job configuration or client's configuration
+ @param save saving it to conf?
+ @return UnixUserGroupInformation a user/group information
+ @exception LoginException if not able to get the user/group information]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Decide if two UGIs are the same
+
+ @param other other object
+ @return true if they are the same; false otherwise.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code for this UGI.
+ The hash code for a UGI is the hash code of its user name string.
+
+ @return a hash code value for this UGI.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this object to a string
+
+ @return a comma separated string containing the user name and group names]]>
+ </doc>
+ </method>
+ <field name="UGI_PROPERTY_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of UserGroupInformation in the Unix system]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <!-- start class org.apache.hadoop.security.UserGroupInformation -->
+ <class name="UserGroupInformation" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="UserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCurrentUGI" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="setCurrentUGI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <doc>
+ <![CDATA[Set the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get username
+
+ @return the user's name]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the groups that the user belong to
+
+ @return an array of group names]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Login and return a UserGroupInformation object.]]>
+ </doc>
+ </method>
+ <method name="readFrom" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link UserGroupInformation} from conf]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A {@link Writable} abstract class for storing user and groups information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UserGroupInformation -->
+</package>
+<package name="org.apache.hadoop.tools">
+ <!-- start class org.apache.hadoop.tools.DistCp -->
+ <class name="DistCp" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="DistCp" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="destPath" type="java.lang.String"/>
+ <param name="logPath" type="org.apache.hadoop.fs.Path"/>
+ <param name="srcAsList" type="boolean"/>
+ <param name="ignoreReadFailures" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This is the main driver for recursively copying directories
+ across file systems. It takes at least two cmdline parameters. A source
+ URL and a destination URL. It then essentially does an "ls -lR" on the
+ source URL, and writes the output in a round-robin manner to all the map
+ input files. The mapper actually copies the files allotted to it. The
+ reduce is empty.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="getRandomId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A Map-reduce program to recursively copy directories between
+ different file-systems.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCp -->
+ <!-- start class org.apache.hadoop.tools.DistCp.DuplicationException -->
+ <class name="DistCp.DuplicationException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="ERROR_CODE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Error code for this exception]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An exception class for duplicated source files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCp.DuplicationException -->
+ <!-- start class org.apache.hadoop.tools.HadoopArchives -->
+ <class name="HadoopArchives" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="HadoopArchives" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="archive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPaths" type="java.util.List&lt;org.apache.hadoop.fs.Path&gt;"/>
+ <param name="archiveName" type="java.lang.String"/>
+ <param name="dest" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[archive the given source paths into
+ the dest
+ @param srcPaths the src paths to be archived
+ @param dest the dest dir that will contain the archive]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[the main driver for creating the archives
+ it takes at least two command line parameters. The src and the
+ dest. It does an lsr on the source paths.
+ The mapper created archuves and the reducer creates
+ the archive index.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[the main functions]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[a archive creation utility.
+ This class provides methods that can be used
+ to create hadoop archives. For understanding of
+ Hadoop archives look at {@link HarFileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.HadoopArchives -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer -->
+ <class name="Logalyzer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Logalyzer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doArchive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logListURI" type="java.lang.String"/>
+ <param name="archiveDirectory" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doArchive: Workhorse function to archive log-files.
+ @param logListURI : The uri which will serve list of log-files to archive.
+ @param archiveDirectory : The directory to store archived logfiles.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="doAnalyze"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFilesDirectory" type="java.lang.String"/>
+ <param name="outputDirectory" type="java.lang.String"/>
+ <param name="grepPattern" type="java.lang.String"/>
+ <param name="sortColumns" type="java.lang.String"/>
+ <param name="columnSeparator" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doAnalyze:
+ @param inputFilesDirectory : Directory containing the files to be analyzed.
+ @param outputDirectory : Directory to store analysis (output).
+ @param grepPattern : Pattern to *grep* for.
+ @param sortColumns : Sort specification for output.
+ @param columnSeparator : Column separator.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[Logalyzer: A utility tool for archiving and analyzing hadoop logs.
+ <p>
+ This tool supports archiving and anaylzing (sort/grep) of log-files.
+ It takes as input
+ a) Input uri which will serve uris of the logs to be archived.
+ b) Output directory (not mandatory).
+ b) Directory on dfs to archive the logs.
+ c) The sort/grep patterns for analyzing the files and separator for boundaries.
+ Usage:
+ Logalyzer -archive -archiveDir <directory to archive logs> -analysis <directory> -logs <log-list uri> -grep <pattern> -sort <col1, col2> -separator <separator>
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <class name="Logalyzer.LogComparator" extends="org.apache.hadoop.io.Text.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Logalyzer.LogComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys of the logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+ <class name="Logalyzer.LogRegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="Logalyzer.LogRegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+</package>
+<package name="org.apache.hadoop.util">
+ <!-- start class org.apache.hadoop.util.Daemon -->
+ <class name="Daemon" extends="java.lang.Thread"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Daemon"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.ThreadGroup, java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread to be part of a specified thread group.]]>
+ </doc>
+ </constructor>
+ <method name="getRunnable" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A thread that has called {@link Thread#setDaemon(boolean) } with true.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Daemon -->
+ <!-- start class org.apache.hadoop.util.DiskChecker -->
+ <class name="DiskChecker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mkdirsWithExistsCheck" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[The semantics of mkdirsWithExistsCheck method is different from the mkdirs
+ method provided in the Sun's java.io.File class in the following way:
+ While creating the non-existent parent directories, this method checks for
+ the existence of those directories if the mkdir fails at any point (since
+ that directory might have just been created by some other process).
+ If both mkdir() and the exists() check fails for any seemingly
+ non-existent directory, then we signal an error; Sun's mkdir would signal
+ an error (return false) if a directory it is attempting to create already
+ exists or the mkdir fails.
+ @param dir
+ @return true on success, false on failure]]>
+ </doc>
+ </method>
+ <method name="checkDir"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ </method>
+ <doc>
+ <![CDATA[Class that provides utility functions for checking disk problem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <class name="DiskChecker.DiskErrorException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskErrorException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <class name="DiskChecker.DiskOutOfSpaceException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskOutOfSpaceException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <!-- start class org.apache.hadoop.util.GenericOptionsParser -->
+ <class name="GenericOptionsParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser<code> to parse only the generic Hadoop
+ arguments.
+
+ The array of string arguments other than the generic arguments can be
+ obtained by {@link #getRemainingArgs()}.
+
+ @param conf the <code>Configuration</code> to modify.
+ @param args command-line arguments.]]>
+ </doc>
+ </constructor>
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, org.apache.commons.cli.Options, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser</code> to parse given options as well
+ as generic Hadoop options.
+
+ The resulting <code>CommandLine</code> object can be obtained by
+ {@link #getCommandLine()}.
+
+ @param conf the configuration to modify
+ @param options options built by the caller
+ @param args User-specified arguments]]>
+ </doc>
+ </constructor>
+ <method name="getRemainingArgs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array of Strings containing only application-specific arguments.
+
+ @return array of <code>String</code>s containing the un-parsed arguments
+ or <strong>empty array</strong> if commandLine was not defined.]]>
+ </doc>
+ </method>
+ <method name="getCommandLine" return="org.apache.commons.cli.CommandLine"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the commons-cli <code>CommandLine</code> object
+ to process the parsed arguments.
+
+ Note: If the object is created with
+ {@link #GenericOptionsParser(Configuration, String[])}, then returned
+ object will only contain parsed generic options.
+
+ @return <code>CommandLine</code> representing list of arguments
+ parsed against Options descriptor.]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Print the usage message for generic command-line options supported.
+
+ @param out stream to print the usage message to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>GenericOptionsParser</code> is a utility to parse command line
+ arguments generic to the Hadoop framework.
+
+ <code>GenericOptionsParser</code> recognizes several standarad command
+ line arguments, enabling applications to easily specify a namenode, a
+ jobtracker, additional configuration resources etc.
+
+ <h4 id="GenericOptions">Generic Options</h4>
+
+ <p>The supported generic options are:</p>
+ <p><blockquote><pre>
+ -conf &lt;configuration file&gt; specify a configuration file
+ -D &lt;property=value&gt; use value for given property
+ -fs &lt;local|namenode:port&gt; specify a namenode
+ -jt &lt;local|jobtracker:port&gt; specify a job tracker
+ -files &lt;comma separated list of files&gt; specify comma separated
+ files to be copied to the map reduce cluster
+ -libjars &lt;comma separated list of jars&gt; specify comma separated
+ jar files to include in the classpath.
+ -archives &lt;comma separated list of archives&gt; specify comma
+ separated archives to be unarchived on the compute machines.
+
+ </pre></blockquote></p>
+
+ <p>The general command line syntax is:</p>
+ <p><tt><pre>
+ bin/hadoop command [genericOptions] [commandOptions]
+ </pre></tt></p>
+
+ <p>Generic command line arguments <strong>might</strong> modify
+ <code>Configuration </code> objects, given to constructors.</p>
+
+ <p>The functionality is implemented using Commons CLI.</p>
+
+ <p>Examples:</p>
+ <p><blockquote><pre>
+ $ bin/hadoop dfs -fs darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -conf hadoop-site.xml -ls /data
+ list /data directory in dfs with conf specified in hadoop-site.xml
+
+ $ bin/hadoop job -D mapred.job.tracker=darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt local -submit job.xml
+ submit a job to local runner
+
+ $ bin/hadoop jar -libjars testlib.jar
+ -archives test.tgz -files file.txt inputjar args
+ job submission with libjars, files and archives
+ </pre></blockquote></p>
+
+ @see Tool
+ @see ToolRunner]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericOptionsParser -->
+ <!-- start class org.apache.hadoop.util.GenericsUtil -->
+ <class name="GenericsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericsUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClass" return="java.lang.Class&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <doc>
+ <![CDATA[Returns the Class object (of type <code>Class&lt;T&gt;</code>) of the
+ argument of type <code>T</code>.
+ @param <T> The type of the argument
+ @param t the object to get it class
+ @return <code>Class&lt;T&gt;</code>]]>
+ </doc>
+ </method>
+ <method name="toArray" return="T[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <param name="list" type="java.util.List&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param c the Class object of the items in the list
+ @param list the list to convert]]>
+ </doc>
+ </method>
+ <method name="toArray" return="T[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="list" type="java.util.List&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param list the list to convert
+ @throws ArrayIndexOutOfBoundsException if the list is empty.
+ Use {@link #toArray(Class, List)} if the list may be empty.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Contains utility methods for dealing with Java Generics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericsUtil -->
+ <!-- start class org.apache.hadoop.util.HeapSort -->
+ <class name="HeapSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="HeapSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the given range of items using heap sort.
+ {@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of HeapSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.HeapSort -->
+ <!-- start class org.apache.hadoop.util.HostsFileReader -->
+ <class name="HostsFileReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HostsFileReader" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="refresh"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getHosts" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExcludedHosts" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.util.HostsFileReader -->
+ <!-- start interface org.apache.hadoop.util.IndexedSortable -->
+ <interface name="IndexedSortable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Compare items at the given addresses consistent with the semantics of
+ {@link java.util.Comparable#compare}.]]>
+ </doc>
+ </method>
+ <method name="swap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Swap items at the given addresses.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for collections capable of being sorted by {@link IndexedSorter}
+ algorithms.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSortable -->
+ <!-- start interface org.apache.hadoop.util.IndexedSorter -->
+ <interface name="IndexedSorter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the items accessed through the given IndexedSortable over the given
+ range of logical indices. From the perspective of the sort algorithm,
+ each index between l (inclusive) and r (exclusive) is an addressable
+ entry.
+ @see IndexedSortable#compare
+ @see IndexedSortable#swap]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Same as {@link #sort(IndexedSortable,int,int)}, but indicate progress
+ periodically.
+ @see #sort(IndexedSortable,int,int)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for sort algorithms accepting {@link IndexedSortable} items.
+
+ A sort algorithm implementing this interface may only
+ {@link IndexedSortable#compare} and {@link IndexedSortable#swap} items
+ for a range of indices to effect a sort across that range.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSorter -->
+ <!-- start class org.apache.hadoop.util.MergeSort -->
+ <class name="MergeSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MergeSort" type="java.util.Comparator&lt;org.apache.hadoop.io.IntWritable&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mergeSort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="int[]"/>
+ <param name="dest" type="int[]"/>
+ <param name="low" type="int"/>
+ <param name="high" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of MergeSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.MergeSort -->
+ <!-- start class org.apache.hadoop.util.NativeCodeLoader -->
+ <class name="NativeCodeLoader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeCodeLoader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeCodeLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if native-hadoop code is loaded for this platform.
+
+ @return <code>true</code> if native-hadoop is loaded,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getLoadNativeLibraries" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Return if native hadoop libraries, if present, can be used for this job.
+ @param jobConf job configuration
+
+ @return <code>true</code> if native hadoop libraries, if present, can be
+ used for this job; <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setLoadNativeLibraries"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="loadNativeLibraries" type="boolean"/>
+ <doc>
+ <![CDATA[Set if native hadoop libraries, if present, can be used for this job.
+
+ @param jobConf job configuration
+ @param loadNativeLibraries can native hadoop libraries be loaded]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A helper to load the native hadoop code i.e. libhadoop.so.
+ This handles the fallback to either the bundled libhadoop-Linux-i386-32.so
+ or the default java implementations where appropriate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.NativeCodeLoader -->
+ <!-- start class org.apache.hadoop.util.PlatformName -->
+ <class name="PlatformName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PlatformName"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPlatformName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete platform as per the java-vm.
+ @return returns the complete platform as per the java-vm.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[A helper class for getting build-info of the java-vm.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PlatformName -->
+ <!-- start class org.apache.hadoop.util.PrintJarMainClass -->
+ <class name="PrintJarMainClass" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PrintJarMainClass"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A micro-application that prints the main class name out of a jar file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PrintJarMainClass -->
+ <!-- start class org.apache.hadoop.util.PriorityQueue -->
+ <class name="PriorityQueue" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PriorityQueue"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="lessThan" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Determines the ordering of objects in this priority queue. Subclasses
+ must define this one method.]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="maxSize" type="int"/>
+ <doc>
+ <![CDATA[Subclass constructors must call this.]]>
+ </doc>
+ </method>
+ <method name="put"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Adds an Object to a PriorityQueue in log(size) time.
+ If one tries to add more objects than maxSize from initialize
+ a RuntimeException (ArrayIndexOutOfBound) is thrown.]]>
+ </doc>
+ </method>
+ <method name="insert" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Adds element to the PriorityQueue in log(size) time if either
+ the PriorityQueue is not full, or not lessThan(element, top()).
+ @param element
+ @return true if element is added, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="top" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the least element of the PriorityQueue in constant time.]]>
+ </doc>
+ </method>
+ <method name="pop" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes and returns the least element of the PriorityQueue in log(size)
+ time.]]>
+ </doc>
+ </method>
+ <method name="adjustTop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should be called when the Object at top changes values. Still log(n)
+ worst case, but it's at least twice as fast to <pre>
+ { pq.top().change(); pq.adjustTop(); }
+ </pre> instead of <pre>
+ { o = pq.pop(); o.change(); pq.push(o); }
+ </pre>]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of elements currently stored in the PriorityQueue.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes all entries from the PriorityQueue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A PriorityQueue maintains a partial ordering of its elements such that the
+ least element can always be found in constant time. Put()'s and pop()'s
+ require log(size) time.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PriorityQueue -->
+ <!-- start class org.apache.hadoop.util.ProgramDriver -->
+ <class name="ProgramDriver" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ProgramDriver"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="mainClass" type="java.lang.Class"/>
+ <param name="description" type="java.lang.String"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is the method that adds the classed to the repository
+ @param name The name of the string you want the class instance to be called with
+ @param mainClass The class that you want to add to the repository
+ @param description The description of the class
+ @throws NoSuchMethodException
+ @throws SecurityException]]>
+ </doc>
+ </method>
+ <method name="driver"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is a driver for the example programs.
+ It looks at the first command line argument and tries to find an
+ example program with that name.
+ If it is found, it calls the main method in that class with the rest
+ of the command line arguments.
+ @param args The argument from the user. args[0] is the command to run.
+ @throws NoSuchMethodException
+ @throws SecurityException
+ @throws IllegalAccessException
+ @throws IllegalArgumentException
+ @throws Throwable Anything thrown by the example program's main]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A driver that is used to run programs added to it]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ProgramDriver -->
+ <!-- start class org.apache.hadoop.util.Progress -->
+ <class name="Progress" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Progress"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new root node.]]>
+ </doc>
+ </constructor>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a named node to the tree.]]>
+ </doc>
+ </method>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds a node to the tree.]]>
+ </doc>
+ </method>
+ <method name="startNextPhase"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Called during execution to move to the next phase at this level in the
+ tree.]]>
+ </doc>
+ </method>
+ <method name="phase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current sub-node executing.]]>
+ </doc>
+ </method>
+ <method name="complete"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Completes this node, moving the parent node to its next child.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progress" type="float"/>
+ <doc>
+ <![CDATA[Called during execution on a leaf node to set its progress.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the overall progress of the root.]]>
+ </doc>
+ </method>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Utility to assist with generation of progress reports. Applications build
+ a hierarchy of {@link Progress} instances, each modelling a phase of
+ execution. The root is constructed with {@link #Progress()}. Nodes for
+ sub-phases are created by calling {@link #addPhase()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Progress -->
+ <!-- start interface org.apache.hadoop.util.Progressable -->
+ <interface name="Progressable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Report progress to the Hadoop framework.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for reporting progress.
+
+ <p>Clients and/or applications can use the provided <code>Progressable</code>
+ to explicitly report progress to the Hadoop framework. This is especially
+ important for operations which take an insignificant amount of time since,
+ in-lieu of the reported progress, the framework has to assume that an error
+ has occured and time-out the operation.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Progressable -->
+ <!-- start class org.apache.hadoop.util.QuickSort -->
+ <class name="QuickSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="QuickSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMaxDepth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="x" type="int"/>
+ <doc>
+ <![CDATA[Deepest recursion before giving up and doing a heapsort.
+ Returns 2 * ceil(log(n)).]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the given range of items using quick sort.
+ {@inheritDoc} If the recursion depth falls below {@link #getMaxDepth},
+ then switch to {@link HeapSort}.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of QuickSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.QuickSort -->
+ <!-- start class org.apache.hadoop.util.ReflectionUtils -->
+ <class name="ReflectionUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReflectionUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theObject" type="java.lang.Object"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check and set 'configuration' if necessary.
+
+ @param theObject object for which to set configuration
+ @param conf Configuration]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create an object for the given class and initialize it from conf
+
+ @param theClass class of which an object is created
+ @param conf Configuration
+ @return a new object]]>
+ </doc>
+ </method>
+ <method name="setContentionTracing"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="boolean"/>
+ </method>
+ <method name="printThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.PrintWriter"/>
+ <param name="title" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Print all of the thread's information and stack traces.
+
+ @param stream the stream to
+ @param title a string title for the stack trace]]>
+ </doc>
+ </method>
+ <method name="logThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="title" type="java.lang.String"/>
+ <param name="minInterval" type="long"/>
+ <doc>
+ <![CDATA[Log the current thread stacks at INFO level.
+ @param log the logger that logs the stack trace
+ @param title a descriptive title for the call stacks
+ @param minInterval the minimum time from the last]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="T"/>
+ <doc>
+ <![CDATA[Return the correctly-typed {@link Class} of the given object.
+
+ @param o object whose correctly-typed <code>Class</code> is to be obtained
+ @return the correctly typed <code>Class</code> of the given object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General reflection utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ReflectionUtils -->
+ <!-- start class org.apache.hadoop.util.RunJar -->
+ <class name="RunJar" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RunJar"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="unJar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jarFile" type="java.io.File"/>
+ <param name="toDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unpack a jar file into a directory.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Run a Hadoop job jar. If the main class is not in the jar's manifest,
+ then it must be provided on the command line.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Run a Hadoop job jar.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.RunJar -->
+ <!-- start class org.apache.hadoop.util.ServletUtil -->
+ <class name="ServletUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ServletUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initHTML" return="java.io.PrintWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="response" type="javax.servlet.ServletResponse"/>
+ <param name="title" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initial HTML header]]>
+ </doc>
+ </method>
+ <method name="getParameter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.ServletRequest"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a parameter from a ServletRequest.
+ Return null if the parameter contains only white spaces.]]>
+ </doc>
+ </method>
+ <method name="htmlFooter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HTML footer to be added in the jsps.
+ @return the HTML footer.]]>
+ </doc>
+ </method>
+ <field name="HTML_TAIL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.util.ServletUtil -->
+ <!-- start class org.apache.hadoop.util.Shell -->
+ <class name="Shell" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param interval the minimum duration to wait before re-executing the
+ command.]]>
+ </doc>
+ </constructor>
+ <method name="getGROUPS_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's groups list]]>
+ </doc>
+ </method>
+ <method name="getGET_PERMISSION_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a Unix command to get permission information.]]>
+ </doc>
+ </method>
+ <method name="getUlimitMemoryCommand" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the Unix command for setting the maximum virtual memory available
+ to a given child process. This is only relevant when we are forking a
+ process from within the {@link org.apache.hadoop.mapred.Mapper} or the
+ {@link org.apache.hadoop.mapred.Reducer} implementations
+ e.g. <a href="{@docRoot}/org/apache/hadoop/mapred/pipes/package-summary.html">Hadoop Pipes</a>
+ or <a href="{@docRoot}/org/apache/hadoop/streaming/package-summary.html">Hadoop Streaming</a>.
+
+ It also checks to ensure that we are running on a *nix platform else
+ (e.g. in Cygwin/Windows) it returns <code>null</code>.
+ @param job job configuration
+ @return a <code>String[]</code> with the ulimit command arguments or
+ <code>null</code> if we are running on a non *nix platform or
+ if the limit is unspecified.]]>
+ </doc>
+ </method>
+ <method name="setEnvironment"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="env" type="java.util.Map&lt;java.lang.String, java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[set the environment for the command
+ @param env Mapping of environment variables]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[set the working directory
+ @param dir The directory where the command would be executed]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[check to see if a command needs to be executed and execute if needed]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return an array containing the command name & its parameters]]>
+ </doc>
+ </method>
+ <method name="parseExecResult"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the execution result]]>
+ </doc>
+ </method>
+ <method name="getProcess" return="java.lang.Process"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the current sub-process executing the given command
+ @return process executing the command]]>
+ </doc>
+ </method>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the exit code
+ @return the exit code of the process]]>
+ </doc>
+ </method>
+ <method name="execCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Static method to execute a shell command.
+ Covers most of the simple cases without requiring the user to implement
+ the <code>Shell</code> interface.
+ @param cmd shell command to execute.
+ @return the output of the executed command.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USER_NAME_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's name]]>
+ </doc>
+ </field>
+ <field name="SET_PERMISSION_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set permission]]>
+ </doc>
+ </field>
+ <field name="SET_OWNER_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set owner]]>
+ </doc>
+ </field>
+ <field name="SET_GROUP_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WINDOWS" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set to true on Windows platforms]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A base class for running a Unix command.
+
+ <code>Shell</code> can be used to run unix commands like <code>du</code> or
+ <code>df</code>. It also offers facilities to gate commands by
+ time-intervals.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell -->
+ <!-- start class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <class name="Shell.ExitCodeException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ExitCodeException" type="int, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is an IOException with exit code added.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <!-- start class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <class name="Shell.ShellCommandExecutor" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File, java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the shell command.]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getOutput" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the output of the shell command.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple shell command executor.
+
+ <code>ShellCommandExecutor</code>should be used in cases where the output
+ of the command needs no explicit parsing and where the command, working
+ directory and the environment remains unchanged. The output of the command
+ is stored as-is and is expected to be small.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <!-- start class org.apache.hadoop.util.StringUtils -->
+ <class name="StringUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StringUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stringifyException" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Make a string representation of the exception.
+ @param e The exception to stringify
+ @return A string with exception name and call stack.]]>
+ </doc>
+ </method>
+ <method name="simpleHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fullHostname" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a full hostname, return the word upto the first dot.
+ @param fullHostname the full hostname
+ @return the hostname to the first dot]]>
+ </doc>
+ </method>
+ <method name="humanReadableInt" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="number" type="long"/>
+ <doc>
+ <![CDATA[Given an integer, return a string that is in an approximate, but human
+ readable format.
+ It uses the bases 'k', 'm', and 'g' for 1024, 1024**2, and 1024**3.
+ @param number the number to format
+ @return a human readable form of the integer]]>
+ </doc>
+ </method>
+ <method name="formatPercent" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="done" type="double"/>
+ <param name="digits" type="int"/>
+ <doc>
+ <![CDATA[Format a percentage for presentation to the user.
+ @param done the percentage to format (0.0 to 1.0)
+ @param digits the number of digits past the decimal point
+ @return a string representation of the percentage]]>
+ </doc>
+ </method>
+ <method name="arrayToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strs" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Given an array of strings, return a comma-separated list of its elements.
+ @param strs Array of strings
+ @return Empty string if strs.length is 0, comma separated list of strings
+ otherwise]]>
+ </doc>
+ </method>
+ <method name="byteToHexString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Given an array of bytes it will convert the bytes to a hex string
+ representation of the bytes
+ @param bytes
+ @return hex string representation of the byte array]]>
+ </doc>
+ </method>
+ <method name="hexStringToByte" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a hexstring this will return the byte array corresponding to the
+ string
+ @param hex the hex String array
+ @return a byte array that is a hex string representation of the given
+ string. The size of the byte array is therefore hex.length/2]]>
+ </doc>
+ </method>
+ <method name="uriToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uris" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[@param uris]]>
+ </doc>
+ </method>
+ <method name="stringToURI" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="stringToPath" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="formatTimeDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Given a finish and start time in long milliseconds, returns a
+ String in the format Xhrs, Ymins, Z sec, for the time difference between two times.
+ If finish time comes before start time then negative valeus of X, Y and Z wil return.
+
+ @param finishTime finish time
+ @param startTime start time]]>
+ </doc>
+ </method>
+ <method name="getFormattedTimeWithDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dateFormat" type="java.text.DateFormat"/>
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Formats time in ms and appends difference (finishTime - startTime)
+ as returned by formatTimeDiff().
+ If finish time is 0, empty string is returned, if start time is 0
+ then difference is not appended to return value.
+ @param dateFormat date format to use
+ @param finishTime fnish time
+ @param startTime start time
+ @return formatted value.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an arraylist of strings.
+ @param str the comma seperated string values
+ @return the arraylist of the comma seperated string values]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a collection of strings.
+ @param str comma seperated string values
+ @return an <code>ArrayList</code> of string values]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Split a string using the default separator
+ @param str a string that may have escaped separator
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="separator" type="char"/>
+ <doc>
+ <![CDATA[Split a string using the given separator
+ @param str a string that may have escaped separator
+ @param escapeChar a char that be used to escape the separator
+ @param separator a separator char
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Escape commas in the string using the default escape char
+ @param str a string
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Escape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the char to be escaped
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Unescape commas in the string using the default escape char
+ @param str a string
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Unescape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the escaped char
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="getHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return hostname without throwing exception.
+ @return hostname]]>
+ </doc>
+ </method>
+ <method name="startupShutdownMessage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <param name="args" type="java.lang.String[]"/>
+ <param name="LOG" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Print a log message for starting up and shutting down
+ @param clazz the class of the server
+ @param args arguments
+ @param LOG the target log object]]>
+ </doc>
+ </method>
+ <field name="COMMA" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ESCAPE_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[General string utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.StringUtils -->
+ <!-- start interface org.apache.hadoop.util.Tool -->
+ <interface name="Tool" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Execute the command with the given arguments.
+
+ @param args command specific arguments.
+ @return exit code.
+ @throws Exception]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A tool interface that supports handling of generic command-line options.
+
+ <p><code>Tool</code>, is the standard for any Map-Reduce tool/application.
+ The tool/application should delegate the handling of
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ standard command-line options</a> to {@link ToolRunner#run(Tool, String[])}
+ and only handle its custom arguments.</p>
+
+ <p>Here is how a typical <code>Tool</code> is implemented:</p>
+ <p><blockquote><pre>
+ public class MyApp extends Configured implements Tool {
+
+ public int run(String[] args) throws Exception {
+ // <code>Configuration</code> processed by <code>ToolRunner</code>
+ Configuration conf = getConf();
+
+ // Create a JobConf using the processed <code>conf</code>
+ JobConf job = new JobConf(conf, MyApp.class);
+
+ // Process custom command-line options
+ Path in = new Path(args[1]);
+ Path out = new Path(args[2]);
+
+ // Specify various job-specific parameters
+ job.setJobName("my-app");
+ job.setInputPath(in);
+ job.setOutputPath(out);
+ job.setMapperClass(MyApp.MyMapper.class);
+ job.setReducerClass(MyApp.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ }
+
+ public static void main(String[] args) throws Exception {
+ // Let <code>ToolRunner</code> handle generic command-line options
+ int res = ToolRunner.run(new Configuration(), new Sort(), args);
+
+ System.exit(res);
+ }
+ }
+ </pre></blockquote></p>
+
+ @see GenericOptionsParser
+ @see ToolRunner]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Tool -->
+ <!-- start class org.apache.hadoop.util.ToolRunner -->
+ <class name="ToolRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ToolRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the given <code>Tool</code> by {@link Tool#run(String[])}, after
+ parsing with the given generic arguments. Uses the given
+ <code>Configuration</code>, or builds one if null.
+
+ Sets the <code>Tool</code>'s configuration with the possibly modified
+ version of the <code>conf</code>.
+
+ @param conf <code>Configuration</code> for the <code>Tool</code>.
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the <code>Tool</code> with its <code>Configuration</code>.
+
+ Equivalent to <code>run(tool.getConf(), tool, args)</code>.
+
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Prints generic command-line argurments and usage information.
+
+ @param out stream to write usage information to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A utility to help run {@link Tool}s.
+
+ <p><code>ToolRunner</code> can be used to run classes implementing
+ <code>Tool</code> interface. It works in conjunction with
+ {@link GenericOptionsParser} to parse the
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ generic hadoop command line arguments</a> and modifies the
+ <code>Configuration</code> of the <code>Tool</code>. The
+ application-specific options are passed along without being modified.
+ </p>
+
+ @see Tool
+ @see GenericOptionsParser]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ToolRunner -->
+ <!-- start class org.apache.hadoop.util.VersionInfo -->
+ <class name="VersionInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the Hadoop version.
+ @return the Hadoop version string, eg. "0.6.3-dev"]]>
+ </doc>
+ </method>
+ <method name="getRevision" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion revision number for the root directory
+ @return the revision number, eg. "451451"]]>
+ </doc>
+ </method>
+ <method name="getDate" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The date that Hadoop was compiled.
+ @return the compilation date in unix date format]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The user that compiled Hadoop.
+ @return the username of the user]]>
+ </doc>
+ </method>
+ <method name="getUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion URL for the root Hadoop directory.]]>
+ </doc>
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the buildVersion which includes version,
+ revision, user and date.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[This class finds the package info for Hadoop and the HadoopVersionAnnotation
+ information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.VersionInfo -->
+ <!-- start class org.apache.hadoop.util.XMLUtils -->
+ <class name="XMLUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="XMLUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="transform"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="styleSheet" type="java.io.InputStream"/>
+ <param name="xml" type="java.io.InputStream"/>
+ <param name="out" type="java.io.Writer"/>
+ <exception name="TransformerConfigurationException" type="javax.xml.transform.TransformerConfigurationException"/>
+ <exception name="TransformerException" type="javax.xml.transform.TransformerException"/>
+ <doc>
+ <![CDATA[Transform input xml given a stylesheet.
+
+ @param styleSheet the style-sheet
+ @param xml input xml data
+ @param out output
+ @throws TransformerConfigurationException
+ @throws TransformerException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General xml utilities.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.XMLUtils -->
+</package>
+
+</api>
diff --git a/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.18.2.xml b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.18.2.xml
new file mode 100644
index 0000000000..08173ab82d
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.18.2.xml
@@ -0,0 +1,38788 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Nov 04 18:17:16 UTC 2008 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="hadoop 0.18.2"
+ jdversion="1.1.1">
+
+<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/ndaley/tools/jdiff/latest/jdiff.jar:/home/ndaley/tools/jdiff/latest/xerces.jar -classpath /home/ndaley/hadoop/branch-0.18/build/classes:/home/ndaley/hadoop/branch-0.18/lib/commons-cli-2.0-SNAPSHOT.jar:/home/ndaley/hadoop/branch-0.18/lib/commons-codec-1.3.jar:/home/ndaley/hadoop/branch-0.18/lib/commons-httpclient-3.0.1.jar:/home/ndaley/hadoop/branch-0.18/lib/commons-logging-1.0.4.jar:/home/ndaley/hadoop/branch-0.18/lib/commons-logging-api-1.0.4.jar:/home/ndaley/hadoop/branch-0.18/lib/commons-net-1.4.1.jar:/home/ndaley/hadoop/branch-0.18/lib/jets3t-0.6.0.jar:/home/ndaley/hadoop/branch-0.18/lib/jetty-5.1.4.jar:/home/ndaley/hadoop/branch-0.18/lib/jetty-ext/commons-el.jar:/home/ndaley/hadoop/branch-0.18/lib/jetty-ext/jasper-compiler.jar:/home/ndaley/hadoop/branch-0.18/lib/jetty-ext/jasper-runtime.jar:/home/ndaley/hadoop/branch-0.18/lib/jetty-ext/jsp-api.jar:/home/ndaley/hadoop/branch-0.18/lib/junit-3.8.1.jar:/home/ndaley/hadoop/branch-0.18/lib/kfs-0.1.3.jar:/home/ndaley/hadoop/branch-0.18/lib/log4j-1.2.15.jar:/home/ndaley/hadoop/branch-0.18/lib/oro-2.0.8.jar:/home/ndaley/hadoop/branch-0.18/lib/servlet-api.jar:/home/ndaley/hadoop/branch-0.18/lib/slf4j-api-1.4.3.jar:/home/ndaley/hadoop/branch-0.18/lib/slf4j-log4j12-1.4.3.jar:/home/ndaley/hadoop/branch-0.18/lib/xmlenc-0.52.jar:/home/ndaley/hadoop/branch-0.18/conf:/home/ndaley/tools/ant/latest/lib/ant-launcher.jar:/home/ndaley/tools/ant/latest/lib/ant-antlr.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-bcel.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-bsf.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-log4j.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-oro.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-regexp.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-resolver.jar:/home/ndaley/tools/ant/latest/lib/ant-commons-logging.jar:/home/ndaley/tools/ant/latest/lib/ant-commons-net.jar:/home/ndaley/tools/ant/latest/lib/ant-jai.jar:/home/ndaley/tools/ant/latest/lib/ant-javamail.jar:/home/ndaley/tools/ant/latest/lib/ant-jdepend.jar:/home/ndaley/tools/ant/latest/lib/ant-jmf.jar:/home/ndaley/tools/ant/latest/lib/ant-jsch.jar:/home/ndaley/tools/ant/latest/lib/ant-junit.jar:/home/ndaley/tools/ant/latest/lib/ant-netrexx.jar:/home/ndaley/tools/ant/latest/lib/ant-nodeps.jar:/home/ndaley/tools/ant/latest/lib/ant-starteam.jar:/home/ndaley/tools/ant/latest/lib/ant-stylebook.jar:/home/ndaley/tools/ant/latest/lib/ant-swing.jar:/home/ndaley/tools/ant/latest/lib/ant-testutil.jar:/home/ndaley/tools/ant/latest/lib/ant-trax.jar:/home/ndaley/tools/ant/latest/lib/ant-weblogic.jar:/home/ndaley/tools/ant/latest/lib/ant.jar:/home/ndaley/tools/ant/latest/lib/xercesImpl.jar:/home/ndaley/tools/ant/latest/lib/xml-apis.jar:/home/hadoopqa/tools/java/jdk1.5.0_11-32bit/lib/tools.jar -sourcepath /home/ndaley/hadoop/branch-0.18/src/core:/home/ndaley/hadoop/branch-0.18/src/mapred:/home/ndaley/hadoop/branch-0.18/src/tools -apidir /home/ndaley/hadoop/branch-0.18/docs/jdiff -apiname hadoop 0.18.2 -->
+<package name="org.apache.hadoop">
+ <!-- start class org.apache.hadoop.HadoopVersionAnnotation -->
+ <class name="HadoopVersionAnnotation" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.annotation.Annotation"/>
+ <doc>
+ <![CDATA[A package attribute that captures the version of Hadoop that was compiled.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.HadoopVersionAnnotation -->
+</package>
+<package name="org.apache.hadoop.conf">
+ <!-- start interface org.apache.hadoop.conf.Configurable -->
+ <interface name="Configurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration to be used by this object.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the configuration used by this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Something that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.conf.Configurable -->
+ <!-- start class org.apache.hadoop.conf.Configuration -->
+ <class name="Configuration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable&lt;java.util.Map.Entry&lt;java.lang.String, java.lang.String&gt;&gt;"/>
+ <constructor name="Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configuration" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration with the same settings cloned from another.
+
+ @param other the configuration from which to clone settings.]]>
+ </doc>
+ </constructor>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param name resource to be added, the classpath is examined for a file
+ with that name.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="url" type="java.net.URL"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param url url of the resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param file file-path of resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, <code>null</code> if
+ no such property exists.
+
+ Values are processed for <a href="#VariableExpansion">variable expansion</a>
+ before being returned.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="getRaw" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, without doing
+ <a href="#VariableExpansion">variable expansion</a>.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the <code>value</code> of the <code>name</code> property.
+
+ @param name property name.
+ @param value property value.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property. If no such property
+ exists, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value, or <code>defaultValue</code> if the property
+ doesn't exist.]]>
+ </doc>
+ </method>
+ <method name="getInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="int"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as an <code>int</code>.
+
+ If no such property exists, or if the specified value is not a valid
+ <code>int</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as an <code>int</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to an <code>int</code>.
+
+ @param name property name.
+ @param value <code>int</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="long"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>long</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>long</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>long</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>long</code>.
+
+ @param name property name.
+ @param value <code>long</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="float"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>float</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>float</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>float</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getBoolean" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="boolean"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>boolean</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>boolean</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>boolean</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setBoolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>boolean</code>.
+
+ @param name property name.
+ @param value <code>boolean</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Parse the given attribute as a set of integer ranges
+ @param name the attribute name
+ @param defaultValue the default value if it is not set
+ @return a new set of ranges from the configured value]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ a collection of <code>String</code>s.
+ If no such property is specified then empty collection is returned.
+ <p>
+ This is an optimized version of {@link #getStrings(String)}
+
+ @param name property name.
+ @return property value as a collection of <code>String</code>s.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then <code>null</code> is returned.
+
+ @param name property name.
+ @return property value as an array of <code>String</code>s,
+ or <code>null</code>.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then default value is returned.
+
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of <code>String</code>s,
+ or default value.]]>
+ </doc>
+ </method>
+ <method name="setStrings"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="values" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Set the array of string values for the <code>name</code> property as
+ as comma delimited values.
+
+ @param name property name.
+ @param values The values]]>
+ </doc>
+ </method>
+ <method name="getClassByName" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Load a class by name.
+
+ @param name the class name.
+ @return the class object.
+ @throws ClassNotFoundException if the class is not found.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>.
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;? extends U&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends U&gt;"/>
+ <param name="xface" type="java.lang.Class&lt;U&gt;"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>
+ implementing the interface specified by <code>xface</code>.
+
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ An exception is thrown if the returned class does not implement the named
+ interface.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @param xface the interface implemented by the named class.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="xface" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to the name of a
+ <code>theClass</code> implementing the given interface <code>xface</code>.
+
+ An exception is thrown if <code>theClass</code> does not implement the
+ interface <code>xface</code>.
+
+ @param name property name.
+ @param theClass property value.
+ @param xface the interface implemented by the named class.]]>
+ </doc>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file under a directory named by <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file name under a directory named in <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getResource" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the {@link URL} for the named resource.
+
+ @param name resource name.
+ @return the url for the named resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get an input stream attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return an input stream attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsReader" return="java.io.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a {@link Reader} attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return a reader attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;java.util.Map.Entry&lt;java.lang.String, java.lang.String&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get an {@link Iterator} to go through the list of <code>String</code>
+ key-value pairs in the configuration.
+
+ @return an iterator over the entries.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write out the non-default properties in this configuration to the give
+ {@link OutputStream}.
+
+ @param out the output stream to write to.]]>
+ </doc>
+ </method>
+ <method name="getClassLoader" return="java.lang.ClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link ClassLoader} for this job.
+
+ @return the correct class loader.]]>
+ </doc>
+ </method>
+ <method name="setClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="classLoader" type="java.lang.ClassLoader"/>
+ <doc>
+ <![CDATA[Set the class loader that will be used to load the various objects.
+
+ @param classLoader the new class loader.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setQuietMode"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="quietmode" type="boolean"/>
+ <doc>
+ <![CDATA[Set the quiteness-mode.
+
+ In the quite-mode error and informational messages might not be logged.
+
+ @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
+ to turn it off.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[For debugging. List non-default properties to the terminal and exit.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provides access to configuration parameters.
+
+ <h4 id="Resources">Resources</h4>
+
+ <p>Configurations are specified by resources. A resource contains a set of
+ name/value pairs as XML data. Each resource is named by either a
+ <code>String</code> or by a {@link Path}. If named by a <code>String</code>,
+ then the classpath is examined for a file with that name. If named by a
+ <code>Path</code>, then the local filesystem is examined directly, without
+ referring to the classpath.
+
+ <p>Hadoop by default specifies two resources, loaded in-order from the
+ classpath: <ol>
+ <li><tt><a href="{@docRoot}/../hadoop-default.html">hadoop-default.xml</a>
+ </tt>: Read-only defaults for hadoop.</li>
+ <li><tt>hadoop-site.xml</tt>: Site-specific configuration for a given hadoop
+ installation.</li>
+ </ol>
+ Applications may add additional resources, which are loaded
+ subsequent to these resources in the order they are added.
+
+ <h4 id="FinalParams">Final Parameters</h4>
+
+ <p>Configuration parameters may be declared <i>final</i>.
+ Once a resource declares a value final, no subsequently-loaded
+ resource can alter that value.
+ For example, one might define a final parameter with:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;dfs.client.buffer.dir&lt;/name&gt;
+ &lt;value&gt;/tmp/hadoop/dfs/client&lt;/value&gt;
+ <b>&lt;final&gt;true&lt;/final&gt;</b>
+ &lt;/property&gt;</pre></tt>
+
+ Administrators typically define parameters as final in
+ <tt>hadoop-site.xml</tt> for values that user applications may not alter.
+
+ <h4 id="VariableExpansion">Variable Expansion</h4>
+
+ <p>Value strings are first processed for <i>variable expansion</i>. The
+ available properties are:<ol>
+ <li>Other properties defined in this Configuration; and, if a name is
+ undefined here,</li>
+ <li>Properties in {@link System#getProperties()}.</li>
+ </ol>
+
+ <p>For example, if a configuration resource contains the following property
+ definitions:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;basedir&lt;/name&gt;
+ &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
+ &lt;/property&gt;
+
+ &lt;property&gt;
+ &lt;name&gt;tempdir&lt;/name&gt;
+ &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt;
+ &lt;/property&gt;</pre></tt>
+
+ When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt>
+ will be resolved to another property in this Configuration, while
+ <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value
+ of the System property with that name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration -->
+ <!-- start class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <class name="Configuration.IntegerRanges" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Configuration.IntegerRanges"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Configuration.IntegerRanges" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isIncluded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Is the given value in the set of ranges
+ @param value the value to check
+ @return is the value in the ranges?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A class that represents a set of positive integer ranges. It parses
+ strings of the form: "2-3,5,7-" where ranges are separated by comma and
+ the lower/upper bounds are separated by dash. Either the lower or upper
+ bound may be omitted meaning all values up to or over. So the string
+ above means 2, 3, 5, and 7, 8, 9, ...]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <!-- start class org.apache.hadoop.conf.Configured -->
+ <class name="Configured" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Configured"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configured" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Base class for things that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configured -->
+</package>
+<package name="org.apache.hadoop.filecache">
+ <!-- start class org.apache.hadoop.filecache.DistributedCache -->
+ <class name="DistributedCache" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedCache"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param fileStatus The file status on the dfs.
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred is
+ returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred
+ is returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="releaseCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is the opposite of getlocalcache. When you are done with
+ using the cache, you need to release the cache
+ @param cache The cache URI to be released
+ @param conf configuration which contains the filesystem the cache
+ is contained in.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeRelative" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTimestamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="cache" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns mtime of a given cache file on hdfs.
+ @param conf configuration
+ @param cache cache file
+ @return mtime of a given cache file on hdfs
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createAllSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="jobCacheDir" type="java.io.File"/>
+ <param name="workDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method create symlinks for all files in a given dir in another directory
+ @param conf the configuration
+ @param jobCacheDir the target directory for creating symlinks
+ @param workDir the directory in which the symlinks are created
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setCacheArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archives" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of archives
+ @param archives The list of archives that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="setCacheFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of files
+ @param files The list of files that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="getCacheArchives" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache archives set in the Configuration
+ @param conf The configuration which contains the archives
+ @return A URI array of the caches set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCacheFiles" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache files set in the Configuration
+ @param conf The configuration which contains the files
+ @return A URI array of the files set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheArchives" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized caches
+ @param conf Configuration that contains the localized archives
+ @return A path array of localized caches
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized files
+ @param conf Configuration that contains the localized files
+ @return A path array of localized files
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getArchiveTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the archives
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFileTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the files
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setArchiveTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the archives to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of archives.
+ The order should be the same as the order in which the archives are added.]]>
+ </doc>
+ </method>
+ <method name="setFileTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the files to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of files.
+ The order should be the same as the order in which the files are added.]]>
+ </doc>
+ </method>
+ <method name="setLocalArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized archives
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+ </doc>
+ </method>
+ <method name="setLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized files
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+ </doc>
+ </method>
+ <method name="addCacheArchive"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a archives to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addCacheFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a file to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addFileToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an file path to the current set of classpath entries It adds the file
+ to cache as well.
+
+ @param file Path of the file to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getFileClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the file entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="addArchiveToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archive" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an archive path to the current set of classpath entries. It adds the
+ archive to cache as well.
+
+ @param archive Path of the archive to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getArchiveClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the archive entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method allows you to create symlinks in the current working directory
+ of the task to all the cache files/archives
+ @param conf the jobconf]]>
+ </doc>
+ </method>
+ <method name="getSymlink" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method checks to see if symlinks are to be create for the
+ localized cache files in the current working directory
+ @param conf the jobconf
+ @return true if symlinks are to be created- else return false]]>
+ </doc>
+ </method>
+ <method name="checkURIs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uriFiles" type="java.net.URI[]"/>
+ <param name="uriArchives" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[This method checks if there is a conflict in the fragment names
+ of the uris. Also makes sure that each uri has a fragment. It
+ is only to be called if you want to create symlinks for
+ the various archives and files.
+ @param uriFiles The uri array of urifiles
+ @param uriArchives the uri array of uri archives]]>
+ </doc>
+ </method>
+ <method name="purgeCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clear the entire contents of the cache and delete the backing files. This
+ should only be used when the server is reinitializing, because the users
+ are going to lose their files.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Distribute application-specific large, read-only files efficiently.
+
+ <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
+ framework to cache files (text, archives, jars etc.) needed by applications.
+ </p>
+
+ <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
+ via the {@link JobConf}. The <code>DistributedCache</code> assumes that the
+ files specified via hdfs:// urls are already present on the
+ {@link FileSystem} at the path specified by the url.</p>
+
+ <p>The framework will copy the necessary files on to the slave node before
+ any tasks for the job are executed on that node. Its efficiency stems from
+ the fact that the files are only copied once per job and the ability to
+ cache archives which are un-archived on the slaves.</p>
+
+ <p><code>DistributedCache</code> can be used to distribute simple, read-only
+ data/text files and/or more complex types such as archives, jars etc.
+ Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes.
+ Jars may be optionally added to the classpath of the tasks, a rudimentary
+ software distribution mechanism. Files have execution permissions.
+ Optionally users can also direct it to symlink the distributed cache file(s)
+ into the working directory of the task.</p>
+
+ <p><code>DistributedCache</code> tracks modification timestamps of the cache
+ files. Clearly the cache files should not be modified by the application
+ or externally while the job is executing.</p>
+
+ <p>Here is an illustrative example on how to use the
+ <code>DistributedCache</code>:</p>
+ <p><blockquote><pre>
+ // Setting up the cache for the application
+
+ 1. Copy the requisite files to the <code>FileSystem</code>:
+
+ $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
+ $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
+ $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
+ $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
+ $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
+ $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
+
+ 2. Setup the application's <code>JobConf</code>:
+
+ JobConf job = new JobConf();
+ DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
+ job);
+ DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
+ DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
+
+ 3. Use the cached files in the {@link Mapper} or {@link Reducer}:
+
+ public static class MapClass extends MapReduceBase
+ implements Mapper&lt;K, V, K, V&gt; {
+
+ private Path[] localArchives;
+ private Path[] localFiles;
+
+ public void configure(JobConf job) {
+ // Get the cached archives/files
+ localArchives = DistributedCache.getLocalCacheArchives(job);
+ localFiles = DistributedCache.getLocalCacheFiles(job);
+ }
+
+ public void map(K key, V value,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Use data from the cached archives/files here
+ // ...
+ // ...
+ output.collect(k, v);
+ }
+ }
+
+ </pre></blockquote></p>
+
+ @see JobConf
+ @see JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.filecache.DistributedCache -->
+</package>
+<package name="org.apache.hadoop.fs">
+ <!-- start class org.apache.hadoop.fs.BlockLocation -->
+ <class name="BlockLocation" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlockLocation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with host, name, offset and length]]>
+ </doc>
+ </constructor>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hosts (hostname) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of names (hostname:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the length of the block]]>
+ </doc>
+ </method>
+ <method name="setOffset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <doc>
+ <![CDATA[Set the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="setLength"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="length" type="long"/>
+ <doc>
+ <![CDATA[Set the length of block]]>
+ </doc>
+ </method>
+ <method name="setHosts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hosts" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the hosts hosting this block]]>
+ </doc>
+ </method>
+ <method name="setNames"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the names (host:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement write of Writable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement readFields of Writable]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BlockLocation -->
+ <!-- start class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <class name="BufferedFSInputStream" extends="java.io.BufferedInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="BufferedFSInputStream" type="org.apache.hadoop.fs.FSInputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a <code>BufferedFSInputStream</code>
+ with the specified buffer size,
+ and saves its argument, the input stream
+ <code>in</code>, for later use. An internal
+ buffer array of length <code>size</code>
+ is created and stored in <code>buf</code>.
+
+ @param in the underlying input stream.
+ @param size the buffer size.
+ @exception IllegalArgumentException if size <= 0.]]>
+ </doc>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A class optimizes reading from FSInputStream by bufferring]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <!-- start class org.apache.hadoop.fs.ChecksumException -->
+ <class name="ChecksumException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumException" type="java.lang.String, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Thrown for checksum errors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumException -->
+ <!-- start class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <class name="ChecksumFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getApproxChkSumLength" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getRawFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the raw file system]]>
+ </doc>
+ </method>
+ <method name="getChecksumFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return the name of the checksum file associated with a file.]]>
+ </doc>
+ </method>
+ <method name="isChecksumFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return true iff file is a checksum file name.]]>
+ </doc>
+ </method>
+ <method name="getChecksumFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileSize" type="long"/>
+ <doc>
+ <![CDATA[Return the length of the checksum file given the size of the
+ actual file.]]>
+ </doc>
+ </method>
+ <method name="getBytesPerSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the bytes Per Checksum]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getChecksumLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ <param name="bytesPerSum" type="int"/>
+ <doc>
+ <![CDATA[Calculated the length of the checksum file in bytes.
+ @param size the length of the data file in bytes
+ @param bytesPerSum the number of bytes in a checksum block
+ @return the number of bytes in the checksum file]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+ Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt>
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename files/dirs]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement the delete(Path, boolean) in checksum
+ file system.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="copyCrc" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ If src and dst are directories, the copyCrc parameter
+ determines whether to copy CRC files.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Report a checksum error to the file system.
+ @param f the file name containing the error
+ @param in the stream open on the file
+ @param inPos the position of the beginning of the bad data in the file
+ @param sums the stream open on the checksum file
+ @param sumsPos the position of the beginning of the bad data in the checksum file
+ @return if retry is neccessary]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract Checksumed FileSystem.
+ It provide a basice implementation of a Checksumed FileSystem,
+ which creates a checksum file for each raw file.
+ It generates & verifies checksums at the client side.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ContentSummary -->
+ <class name="ContentSummary" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ContentSummary"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the length]]>
+ </doc>
+ </method>
+ <method name="getDirectoryCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the directory count]]>
+ </doc>
+ </method>
+ <method name="getFileCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the file count]]>
+ </doc>
+ </method>
+ <method name="getQuota" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the directory quota]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHeader" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the header of the output.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the header of the output]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the string representation of the object in the output format.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the string representation of the object]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store the summary of a content (a directory or a file).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ContentSummary -->
+ <!-- start class org.apache.hadoop.fs.DF -->
+ <class name="DF" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DF" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="DF" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFilesystem" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAvailable" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPercentUsed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMount" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="DF_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'df' program.
+ Tested on Linux, FreeBSD, Cygwin.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DF -->
+ <!-- start class org.apache.hadoop.fs.DU -->
+ <class name="DU" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DU" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param interval refresh the disk usage at this interval
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <constructor name="DU" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param conf configuration object
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <method name="decDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Decrease how much disk space we use.
+ @param value decrease by this value]]>
+ </doc>
+ </method>
+ <method name="incDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Increase how much disk space we use.
+ @param value increase by this value]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return disk space used
+ @throws IOException if the shell command fails]]>
+ </doc>
+ </method>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the path of which we're keeping track of disk usage]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Start the disk usage checking thread.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down the refreshing thread.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'du' program]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DU -->
+ <!-- start class org.apache.hadoop.fs.FileStatus -->
+ <class name="FileStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <constructor name="FileStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this a directory?
+ @return true if this is a directory]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the block size of the file.
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the replication factor of a file.
+ @return the replication factor of a file.]]>
+ </doc>
+ </method>
+ <method name="getModificationTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the modification time of the file.
+ @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get FsPermission associated with the file.
+ @return permssion. If a filesystem does not have a notion of permissions
+ or if permissions could not be determined, then default
+ permissions equivalent of "rwxrwxrwx" is returned.]]>
+ </doc>
+ </method>
+ <method name="getOwner" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the owner of the file.
+ @return owner of the file. The string could be empty if there is no
+ notion of owner of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getGroup" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the group associated with the file.
+ @return group for the file. The string could be empty if there is no
+ notion of group of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Sets permission.
+ @param permission if permission is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="owner" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets owner.
+ @param owner if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setGroup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets group.
+ @param group if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare this object to another object
+
+ @param o the object to be compared.
+ @return a negative integer, zero, or a positive integer as this object
+ is less than, equal to, or greater than the specified object.
+
+ @throws ClassCastException if the specified object's is not of
+ type FileStatus]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare if this object is equal to another object
+ @param o the object to be compared.
+ @return true if two file status has the same path name; false if not.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for the object, which is defined as
+ the hash code of the path name.
+
+ @return a hash code value for the path name.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that represents the client side information for a file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileStatus -->
+ <!-- start class org.apache.hadoop.fs.FileSystem -->
+ <class name="FileSystem" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="FileSystem"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseArgs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="i" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the cmd-line args, starting at i. Remove consumed args
+ from array. We expect param in the form:
+ '-local | -dfs <namenode:port>']]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the configured filesystem implementation.]]>
+ </doc>
+ </method>
+ <method name="getDefaultUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default filesystem URI from a configuration.
+ @param conf the configuration to access
+ @return the uri of the default filesystem]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.net.URI"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="getNamed" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="call #get(URI,Configuration) instead.">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated call #get(URI,Configuration) instead.]]>
+ </doc>
+ </method>
+ <method name="getLocal" return="org.apache.hadoop.fs.LocalFileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the local file syste
+ @param conf the configuration to configure the file system with
+ @return a LocalFileSystem]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the FileSystem for this URI's scheme and authority. The scheme
+ of the URI determines a configuration property name,
+ <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
+ The entire URI is passed to the FileSystem instance's initialize method.]]>
+ </doc>
+ </method>
+ <method name="closeAll"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all cached filesystems. Be sure those filesystems are not
+ used anymore.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a file with the provided permission
+ The permission of the file is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ It is implemented using two RPCs. It is understood that it is inefficient,
+ but the implementation is thread-safe. The other option is to change the
+ value of umask in configuration to be 0, but it is not thread-safe.
+
+ @param fs file system handle
+ @param file the name of the file to be created
+ @param permission the permission of the file
+ @return an output stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a directory with the provided permission
+ The permission of the directory is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ @see #create(FileSystem, Path, FsPermission)
+
+ @param fs file system handle
+ @param dir the name of the directory to be created
+ @param permission the permission of the directory
+ @return true if the directory creation succeeds; false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getFileBlockLocations(FileStatus, long, long)}">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.
+ @deprecated use {@link #getFileBlockLocations(FileStatus, long, long)}]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file to open]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param permission
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize
+ @param progress
+ @throws IOException
+ @see #setPermission(Path, FsPermission)]]>
+ </doc>
+ </method>
+ <method name="createNewFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the given Path as a brand-new zero-length file. If
+ create fails, or if it already existed, return false.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, getConf().getInt("io.file.buffer.size", 4096), null)
+ @param f the existing file to be appended.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, bufferSize, null).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @param progress for reporting progress if it is not null.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get replication.
+
+ @deprecated Use getFileStatus() instead
+ @param src file name
+ @return file replication
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file.
+
+ @param f the path to delete.
+ @param recursive if path is a directory and set to
+ true, the directory is deleted else throws an exception. In
+ case of a file the recursive can be set to either true or false.
+ @return true if delete is successful else false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="deleteOnExit" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a path to be deleted when FileSystem is closed.
+ When the JVM shuts down,
+ all FileSystem objects will be closed automatically.
+ Then,
+ the marked path will be deleted as a result of closing the FileSystem.
+
+ The path has to exist in the file system.
+
+ @param f the path to delete.
+ @return true if deleteOnExit is successful, otherwise false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="processDeleteOnExit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Delete all files that were marked as delete-on-exit. This recursively
+ deletes all files in the specified paths.]]>
+ </doc>
+ </method>
+ <method name="exists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if exists.
+ @param f source file]]>
+ </doc>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[True iff the named path is a regular file.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the {@link ContentSummary} of a given {@link Path}.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given path using the user-supplied path
+ filter.
+
+ @param f
+ a path name
+ @param filter
+ the user-supplied path filter
+ @return an array of FileStatus objects for the files under the given path
+ after applying the filter
+ @throws IOException
+ if encounter any problem while fetching the status]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using default
+ path filter.
+
+ @param files
+ a list of paths
+ @return a list of statuses for the files under the given paths after
+ applying the filter default Path filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using user-supplied
+ path filter.
+
+ @param files
+ a list of paths
+ @param filter
+ the user-supplied path filter
+ @return a list of statuses for the files under the given paths after
+ applying the filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Return all the files that match filePattern and are not checksum
+ files. Results are sorted by their names.
+
+ <p>
+ A filename pattern is composed of <i>regular</i> characters and
+ <i>special pattern matching</i> characters, which are:
+
+ <dl>
+ <dd>
+ <dl>
+ <p>
+ <dt> <tt> ? </tt>
+ <dd> Matches any single character.
+
+ <p>
+ <dt> <tt> * </tt>
+ <dd> Matches zero or more characters.
+
+ <p>
+ <dt> <tt> [<i>abc</i>] </tt>
+ <dd> Matches a single character from character set
+ <tt>{<i>a,b,c</i>}</tt>.
+
+ <p>
+ <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
+ <dd> Matches a single character from the character range
+ <tt>{<i>a...b</i>}</tt>. Note that character <tt><i>a</i></tt> must be
+ lexicographically less than or equal to character <tt><i>b</i></tt>.
+
+ <p>
+ <dt> <tt> [^<i>a</i>] </tt>
+ <dd> Matches a single character that is not from character set or range
+ <tt>{<i>a</i>}</tt>. Note that the <tt>^</tt> character must occur
+ immediately to the right of the opening bracket.
+
+ <p>
+ <dt> <tt> \<i>c</i> </tt>
+ <dd> Removes (escapes) any special meaning of character <i>c</i>.
+
+ <p>
+ <dt> <tt> {ab,cd} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt>
+
+ <p>
+ <dt> <tt> {ab,c{de,fh}} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cde, cfh</i>}</tt>
+
+ </dl>
+ </dd>
+ </dl>
+
+ @param pathPattern a regular expression specifying a pth pattern
+
+ @return an array of paths that match the path pattern
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array of FileStatus objects whose path names match pathPattern
+ and is accepted by the user-supplied path filter. Results are sorted by
+ their path names.
+ Return null if pathPattern has no glob and the path does not exist.
+ Return an empty array if pathPattern has a glob and no path matches it.
+
+ @param pathPattern
+ a regular expression specifying the path pattern
+ @param filter
+ a user-supplied path filter
+ @return an array of FileStatus objects
+ @throws IOException if any I/O error occurs when fetching file status]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the current user's home directory in this filesystem.
+ The default implementation returns "/user/$USER/".]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param new_dir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #mkdirs(Path, FsPermission)} with default permission.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make the given file and all non-existent parents into
+ directories. Has the semantics of Unix 'mkdir -p'.
+ Existence of the directory hierarchy is not an error.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name and the source is kept intact afterwards]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files are on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="moveToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ Remove the source afterwards]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[No more filesystem operations are needed. Will
+ release any held locks.]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total size of all files in the filesystem.]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a file status object that represents the path.
+ @param f The path we want information from
+ @return a FileStatus object
+ @throws FileNotFoundException when the path does not exist;
+ IOException see specific implementation]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permission of a path.
+ @param p
+ @param permission]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set owner of a path (i.e. a file or a directory).
+ The parameters username and groupname cannot both be null.
+ @param p The path
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.]]>
+ </doc>
+ </method>
+ <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class&lt;? extends org.apache.hadoop.fs.FileSystem&gt;"/>
+ <doc>
+ <![CDATA[Get the statistics for a particular file system
+ @param cls the class to lookup
+ @return a statistics object]]>
+ </doc>
+ </method>
+ <method name="printStatistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="statistics" type="org.apache.hadoop.fs.FileSystem.Statistics"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The statistics for this file system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An abstract base class for a fairly generic filesystem. It
+ may be implemented as a distributed filesystem, or as a "local"
+ one that reflects the locally-connected disk. The local version
+ exists for small Hadoop instances and for testing.
+
+ <p>
+
+ All user code that may potentially use the Hadoop Distributed
+ File System should be written to use a FileSystem object. The
+ Hadoop DFS is a multi-machine system that appears as a single
+ disk. It's useful because of its fault tolerance and potentially
+ very large capacity.
+
+ <p>
+ The local implementation is {@link LocalFileSystem} and distributed
+ implementation is {@link DistributedFileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem -->
+ <!-- start class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <class name="FileSystem.Statistics" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="incrementBytesRead"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes read in the statistics
+ @param newBytes the additional bytes read]]>
+ </doc>
+ </method>
+ <method name="incrementBytesWritten"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes written in the statistics
+ @param newBytes the additional bytes written]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes read
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes written
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <!-- start class org.apache.hadoop.fs.FileUtil -->
+ <class name="FileUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path
+
+ @param stats
+ an array of FileStatus objects
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path.
+ If stats if null, return path
+ @param stats
+ an array of FileStatus objects
+ @param path
+ default path to return in stats is null
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="fullyDelete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a directory and all its contents. If
+ we return false, the directory may be partially-deleted.]]>
+ </doc>
+ </method>
+ <method name="fullyDelete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recursively delete a directory.
+
+ @param fs {@link FileSystem} on which the path is present
+ @param dir directory to recursively delete
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copyMerge" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dstFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="addString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy all files in a directory to one output file (merge).]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy local files to a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="java.io.File"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy FileSystem files to local files.]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param filename The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="getDU" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Takes an input dir and returns the du on that local directory. Very basic
+ implementation.
+
+ @param dir
+ The input dir to get the disk space of this local dir
+ @return The total disk space of the input local directory]]>
+ </doc>
+ </method>
+ <method name="unZip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="unzipDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a File input it will unzip the file in a the unzip directory
+ passed as the second parameter
+ @param inFile The zip file as input
+ @param unzipDir The unzip directory where to unzip the zip file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unTar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="untarDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a Tar File as input it will untar the file in a the untar directory
+ passed as the second parameter
+
+ This utility will untar ".tar" files and ".tar.gz","tgz" files.
+
+ @param inFile The tar file as input.
+ @param untarDir The untar directory where to untar the tar file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="symLink" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="linkname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a soft link between a src and destination
+ only on a local disk. HDFS does not support this
+ @param target the target for symlink
+ @param linkname the symlink
+ @return value returned by the command]]>
+ </doc>
+ </method>
+ <method name="chmod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="perm" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Change the permissions on a filename.
+ @param filename the name of the file to change
+ @param perm the permission string
+ @return the exit code from the command
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="createLocalTempFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="basefile" type="java.io.File"/>
+ <param name="prefix" type="java.lang.String"/>
+ <param name="isDeleteOnExit" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a tmp file for a base file.
+ @param basefile the base file of the tmp
+ @param prefix file name prefix of tmp
+ @param isDeleteOnExit if true, the tmp will be deleted when the VM exits
+ @return a newly created tmp file
+ @exception IOException If a tmp file cannot created
+ @see java.io.File#createTempFile(String, String, File)
+ @see java.io.File#deleteOnExit()]]>
+ </doc>
+ </method>
+ <method name="replaceFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="target" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move the src file to the name specified by target.
+ @param src the source file
+ @param target the target file
+ @exception IOException If this operation fails]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of file-processing util methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil -->
+ <!-- start class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <class name="FileUtil.HardLink" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil.HardLink"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createHardLink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.io.File"/>
+ <param name="linkName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a hardlink]]>
+ </doc>
+ </method>
+ <method name="getLinkCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Retrieves the number of links to the specified file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Class for creating hardlinks.
+ Supports Unix, Cygwin, WindXP.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <!-- start class org.apache.hadoop.fs.FilterFileSystem -->
+ <class name="FilterFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FilterFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FilterFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List files in a directory.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param newDir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get file status.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A <code>FilterFileSystem</code> contains
+ some other file system, which it uses as
+ its basic file system, possibly transforming
+ the data along the way or providing additional
+ functionality. The class <code>FilterFileSystem</code>
+ itself simply overrides all methods of
+ <code>FileSystem</code> with versions that
+ pass all requests to the contained file
+ system. Subclasses of <code>FilterFileSystem</code>
+ may further override some of these methods
+ and may also provide additional methods
+ and fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FilterFileSystem -->
+ <!-- start class org.apache.hadoop.fs.FSDataInputStream -->
+ <class name="FSDataInputStream" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSDataInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="desired" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
+ and buffers input through a {@link BufferedInputStream}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSDataOutputStream -->
+ <class name="FSDataOutputStream" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Syncable"/>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWrappedStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link OutputStream} in a {@link DataOutputStream},
+ buffers output through a {@link BufferedOutputStream} and creates a checksum
+ file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataOutputStream -->
+ <!-- start class org.apache.hadoop.fs.FSError -->
+ <class name="FSError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Thrown for unexpected filesystem errors, presumed to reflect disk errors
+ in the native filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSError -->
+ <!-- start class org.apache.hadoop.fs.FSInputChecker -->
+ <class name="FSInputChecker" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs]]>
+ </doc>
+ </constructor>
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int, boolean, java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs
+ @param sum the type of Checksum engine
+ @param chunkSize maximun chunk size
+ @param checksumSize the number byte of each checksum]]>
+ </doc>
+ </constructor>
+ <method name="readChunk" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads in next checksum chunk data into <code>buf</code> at <code>offset</code>
+ and checksum into <code>checksum</code>.
+ The method is used for implementing read, therefore, it should be optimized
+ for sequential reading
+ @param pos chunkPos
+ @param buf desitination buffer
+ @param offset offset in buf at which to store data
+ @param len maximun number of bytes to read
+ @return number of bytes read]]>
+ </doc>
+ </method>
+ <method name="getChunkPosition" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <doc>
+ <![CDATA[Return position of beginning of chunk containing pos.
+
+ @param pos a postion in the file
+ @return the starting position of the chunk which contains the byte]]>
+ </doc>
+ </method>
+ <method name="needChecksum" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if there is a need for checksum verification]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read one checksum-verified byte
+
+ @return the next byte of data, or <code>-1</code> if the end of the
+ stream is reached.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read checksum verified bytes from this byte-input stream into
+ the specified byte array, starting at the given offset.
+
+ <p> This method implements the general contract of the corresponding
+ <code>{@link InputStream#read(byte[], int, int) read}</code> method of
+ the <code>{@link InputStream}</code> class. As an additional
+ convenience, it attempts to read as many bytes as possible by repeatedly
+ invoking the <code>read</code> method of the underlying stream. This
+ iterated <code>read</code> continues until one of the following
+ conditions becomes true: <ul>
+
+ <li> The specified number of bytes have been read,
+
+ <li> The <code>read</code> method of the underlying stream returns
+ <code>-1</code>, indicating end-of-file.
+
+ </ul> If the first <code>read</code> on the underlying stream returns
+ <code>-1</code> to indicate end-of-file then this method returns
+ <code>-1</code>. Otherwise this method returns the number of bytes
+ actually read.
+
+ @param b destination buffer.
+ @param off offset at which to start storing bytes.
+ @param len maximum number of bytes to read.
+ @return the number of bytes read, or <code>-1</code> if the end of
+ the stream has been reached.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if any checksum error occurs]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over and discards <code>n</code> bytes of data from the
+ input stream.
+
+ <p>This method may skip more bytes than are remaining in the backing
+ file. This produces no exception and the number of bytes skipped
+ may include some number of bytes that were beyond the EOF of the
+ backing file. Attempting to read from the stream after skipping past
+ the end will result in -1 indicating the end of the file.
+
+<p>If <code>n</code> is negative, no bytes are skipped.
+
+ @param n the number of bytes to be skipped.
+ @return the actual number of bytes skipped.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to skip to is corrupted]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given position in the stream.
+ The next read() will be from that position.
+
+ <p>This method may seek past the end of the file.
+ This produces no exception and an attempt to read from
+ the stream will result in -1 indicating the end of the file.
+
+ @param pos the postion to seek to.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to seek to is corrupted]]>
+ </doc>
+ </method>
+ <method name="readFully" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="stm" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A utility function that tries to read up to <code>len</code> bytes from
+ <code>stm</code>
+
+ @param stm an input stream
+ @param buf destiniation buffer
+ @param offset offset at which to store data
+ @param len number of bytes to read
+ @return actual number of bytes read
+ @throws IOException if there is any IO error]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sum" type="java.util.zip.Checksum"/>
+ <param name="maxChunkSize" type="int"/>
+ <param name="checksumSize" type="int"/>
+ <doc>
+ <![CDATA[Set the checksum related parameters
+ @param sum which type of checksum to use
+ @param maxChunkSize maximun chunk size
+ @param checksumSize checksum size]]>
+ </doc>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="readlimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="file" type="org.apache.hadoop.fs.Path"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file name from which data is read from]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This is a generic input stream for verifying checksums for
+ data before it is read by a user.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputChecker -->
+ <!-- start class org.apache.hadoop.fs.FSInputStream -->
+ <class name="FSInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSInputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="seek"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[FSInputStream is a generic old InputStream with a little bit
+ of RAF-style seek ability.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSOutputSummer -->
+ <class name="FSOutputSummer" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSOutputSummer" type="java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="writeChunk"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write one byte]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes <code>len</code> bytes from the specified byte array
+ starting at offset <code>off</code> and generate a checksum for
+ each data chunk.
+
+ <p> This method stores bytes from the given array into this
+ stream's buffer before it gets checksumed. The buffer gets checksumed
+ and flushed to the underlying output stream when all data
+ in a checksum chunk are in the buffer. If the buffer is empty and
+ requested length is at least as large as the size of next checksum chunk
+ size, this method will checksum and write the chunk directly
+ to the underlying output stream. Thus it avoids uneccessary data copy.
+
+ @param b the data.
+ @param off the start offset in the data.
+ @param len the number of bytes to write.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This is a generic output stream for generating checksums for
+ data before it is written to the underlying stream]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSOutputSummer -->
+ <!-- start class org.apache.hadoop.fs.FsShell -->
+ <class name="FsShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="FsShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentTrashDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the Trash object associated with this shell.]]>
+ </doc>
+ </method>
+ <method name="byteDesc" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="long"/>
+ <doc>
+ <![CDATA[Return an abbreviated English-language desc of the byte length]]>
+ </doc>
+ </method>
+ <method name="limitDecimalTo2" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dateForm" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="modifFmt" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provide command line access to a FileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsShell -->
+ <!-- start class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <class name="FsUrlStreamHandlerFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.net.URLStreamHandlerFactory"/>
+ <constructor name="FsUrlStreamHandlerFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsUrlStreamHandlerFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createURLStreamHandler" return="java.net.URLStreamHandler"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Factory for URL stream handlers.
+
+ There is only one handler whose job is to create UrlConnections. A
+ FsUrlConnection relies on FileSystem to choose the appropriate FS
+ implementation.
+
+ Before returning our handler, we make sure that FileSystem knows an
+ implementation for the requested scheme/protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <!-- start class org.apache.hadoop.fs.HarFileSystem -->
+ <class name="HarFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HarFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[public construction of harfilesystem]]>
+ </doc>
+ </constructor>
+ <constructor name="HarFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor to create a HarFileSystem with an
+ underlying filesystem.
+ @param fs]]>
+ </doc>
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a Har filesystem per har archive. The
+ archive home directory is the top level directory
+ in the filesystem that contains the HAR archive.
+ Be careful with this method, you do not want to go
+ on creating new Filesystem instances per call to
+ path.getFileSystem().
+ the uri of Har is
+ har://underlyingfsscheme-host:port/archivepath.
+ or
+ har:///archivepath. This assumes the underlying filesystem
+ to be used in case not specified.]]>
+ </doc>
+ </method>
+ <method name="getHarVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive.]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the uri of this filesystem.
+ The uri is of the form
+ har://underlyingfsschema-host:port/pathintheunderlyingfs]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[get block locations from the underlying fs
+ @param file the input filestatus to get block locations
+ @param start the start in the file
+ @param len the length in the file
+ @return block locations for this segment of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getHarHash" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[the hash of the path p inside iniside
+ the filesystem
+ @param p the path in the harfilesystem
+ @return the hash code of the path.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[return the filestatus of files in har archive.
+ The permission returned are that of the archive
+ index files. The permissions are not persisted
+ while creating a hadoop archive.
+ @param f the path in har filesystem
+ @return filestatus.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a har input stream which fakes end of
+ file. It reads the index files to get the part
+ file name and the size and start of the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[liststatus returns the children of a directory
+ after looking up the index files.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive path.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[copies the file in the har filesystem to a local file.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permisssion" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <field name="VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is an implementation of the Hadoop Archive
+ Filesystem. This archive Filesystem has index files
+ of the form _index* and has contents of the form
+ part-*. The index files store the indexes of the
+ real files. The index files are of the form _masterindex
+ and _index. The master index is a level of indirection
+ in to the index file to make the look ups faster. the index
+ file is sorted with hash code of the paths that it contains
+ and the master index contains pointers to the positions in
+ index for ranges of hashcodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.HarFileSystem -->
+ <!-- start class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <class name="InMemoryFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InMemoryFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InMemoryFileSystem" type="java.net.URI, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reserveSpaceWithCheckSum" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Register a file with its size. This will also register a checksum for the
+ file that the user is trying to create. This is required since none of
+ the FileSystem APIs accept the size of the file as argument. But since it
+ is required for us to apriori know the size of the file we are going to
+ create, the user must call this method for each file he wants to create
+ and reserve memory for that file. We either succeed in reserving memory
+ for both the main file and the checksum file and return true, or return
+ false.]]>
+ </doc>
+ </method>
+ <method name="getFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getNumFiles" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getFSSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPercentUsed" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of the in-memory filesystem. This implementation assumes
+ that the file lengths are known ahead of time and the total lengths of all
+ the files is below a certain number (like 100 MB, configurable). Use the API
+ reserveSpaceWithCheckSum(Path f, int size) (see below for a description of
+ the API for reserving space in the FS. The uri of this filesystem starts with
+ ramfs:// .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <!-- start class org.apache.hadoop.fs.LocalDirAllocator -->
+ <class name="LocalDirAllocator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalDirAllocator" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an allocator object
+ @param contextCfgItemName]]>
+ </doc>
+ </constructor>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. This method should be used if the size of
+ the file is not known apriori. We go round-robin over the set of disks
+ (via the configured dirs) and return the first complete path where
+ we could create the parent directory of the passed path.
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. Pass size as -1 if not known apriori. We
+ round-robin over the set of disks (via the configured dirs) and return
+ the first complete path which has enough space
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathToRead" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS for reading. We search through all the
+ configured dirs for the file's existence and return the complete
+ path to the file when we find one
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createTmpFileForWrite" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a temporary file in the local FS. Pass size as -1 if not known
+ apriori. We round-robin over the set of disks (via the configured dirs)
+ and select the first complete path which has enough space. A file is
+ created on this directory. The file is guaranteed to go away when the
+ JVM exits.
+ @param pathStr prefix for the temporary file
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return a unique temporary file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isContextValid" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextCfgItemName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Method to check whether a context is valid
+ @param contextCfgItemName
+ @return true/false]]>
+ </doc>
+ </method>
+ <method name="ifExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[We search through all the configured dirs for the file's existence
+ and return true when we find
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return true if files exist. false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of a round-robin scheme for disk allocation for creating
+ files. The way it works is that it is kept track what disk was last
+ allocated for a file write. For the current request, the next disk from
+ the set of disks would be allocated if the free space on the disk is
+ sufficient enough to accomodate the file that is being considered for
+ creation. If the space requirements cannot be met, the next disk in order
+ would be tried and so on till a disk is found with sufficient capacity.
+ Once a disk with sufficient space is identified, a check is done to make
+ sure that the disk is writable. Also, there is an API provided that doesn't
+ take the space requirements into consideration but just checks whether the
+ disk under consideration is writable (this should be used for cases where
+ the file size is not known apriori). An API is provided to read a path that
+ was created earlier. That API works by doing a scan of all the disks for the
+ input pathname.
+ This implementation also provides the functionality of having multiple
+ allocators per JVM (one for each unique functionality or context, like
+ mapred, dfs-client, etc.). It ensures that there is only one instance of
+ an allocator per context per JVM.
+ Note:
+ 1. The contexts referred above are actually the configuration items defined
+ in the Configuration class like "mapred.local.dir" (for which we want to
+ control the dir allocations). The context-strings are exactly those
+ configuration items.
+ 2. This implementation does not take into consideration cases where
+ a disk becomes read-only or goes out of space while a file is being written
+ to (disks are shared between multiple processes, and so the latter situation
+ is probable).
+ 3. In the class implementation, "Disk" is referred to as "Dir", which
+ actually points to the configured directory on the Disk which will be the
+ parent for all file write/read allocations.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalDirAllocator -->
+ <!-- start class org.apache.hadoop.fs.LocalFileSystem -->
+ <class name="LocalFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocalFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Moves files to a bad file directory on the same device, so that their
+ storage will not be reused.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the checksumed local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalFileSystem -->
+ <!-- start class org.apache.hadoop.fs.Path -->
+ <class name="Path" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="Path" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a path from a String. Path strings are URIs, but with
+ unescaped elements and some additional normalization.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Path from components.]]>
+ </doc>
+ </constructor>
+ <method name="toUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this to a URI.]]>
+ </doc>
+ </method>
+ <method name="getFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the FileSystem that owns this Path.]]>
+ </doc>
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True if the directory of this path is absolute.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the final component of this path.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the parent of a path or null if at root.]]>
+ </doc>
+ </method>
+ <method name="suffix" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a suffix to the final name in the path.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="depth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of elements in this path.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <doc>
+ <![CDATA[Returns a qualified path object.]]>
+ </doc>
+ </method>
+ <field name="SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The directory separator, a slash.]]>
+ </doc>
+ </field>
+ <field name="SEPARATOR_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CUR_DIR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Names a file or directory in a {@link FileSystem}.
+ Path strings use slash as the directory separator. A path string is
+ absolute if it begins with a slash.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Path -->
+ <!-- start interface org.apache.hadoop.fs.PathFilter -->
+ <interface name="PathFilter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Tests whether or not the specified abstract pathname should be
+ included in a pathname list.
+
+ @param path The abstract pathname to be tested
+ @return <code>true</code> if and only if <code>pathname</code>
+ should be included]]>
+ </doc>
+ </method>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PathFilter -->
+ <!-- start interface org.apache.hadoop.fs.PositionedReadable -->
+ <interface name="PositionedReadable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read upto the specified number of bytes, from a given
+ position within a file, and return the number of bytes read. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the specified number of bytes, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read number of bytes equalt to the length of the buffer, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits positional reading.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PositionedReadable -->
+ <!-- start class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <class name="RawLocalFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RawLocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the specified directory hierarchy. Does not
+ treat existence as an error.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsWorkingFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chown to set owner.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chmod to set permission.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the raw local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <!-- start interface org.apache.hadoop.fs.Seekable -->
+ <interface name="Seekable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits seeking.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Seekable -->
+ <!-- start class org.apache.hadoop.fs.ShellCommand -->
+ <class name="ShellCommand" extends="org.apache.hadoop.util.Shell"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link Shell} instead.">
+ <constructor name="ShellCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A base class for running a unix command like du or df.
+ @deprecated Use {@link Shell} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ShellCommand -->
+ <!-- start interface org.apache.hadoop.fs.Syncable -->
+ <interface name="Syncable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Synchronize all buffer with the underlying devices.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface declare the sync() operation.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Syncable -->
+ <!-- start class org.apache.hadoop.fs.Trash -->
+ <class name="Trash" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Trash" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a trash can accessor.
+ @param conf a Configuration]]>
+ </doc>
+ </constructor>
+ <method name="moveToTrash" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move a file or directory to the current trash directory.
+ @return false if the item is already in the trash or trash is disabled]]>
+ </doc>
+ </method>
+ <method name="checkpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a trash checkpoint.]]>
+ </doc>
+ </method>
+ <method name="expunge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete old checkpoints.]]>
+ </doc>
+ </method>
+ <method name="getEmptier" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a {@link Runnable} that periodically empties the trash of all
+ users, intended to be run by the superuser. Only one checkpoint is kept
+ at a time.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Run an emptier.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provides a <i>trash</i> feature. Files are moved to a user's trash
+ directory, a subdirectory of their home directory named ".Trash". Files are
+ initially moved to a <i>current</i> sub-directory of the trash directory.
+ Within that sub-directory their original path is preserved. Periodically
+ one may checkpoint the current trash and remove older checkpoints. (This
+ design permits trash management without enumeration of the full trash
+ content, without date support in the filesystem, and without clock
+ synchronization.)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Trash -->
+</package>
+<package name="org.apache.hadoop.fs.ftp">
+ <!-- start class org.apache.hadoop.fs.ftp.FTPException -->
+ <class name="FTPException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.String, java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A class to wrap a {@link Throwable} into a Runtime Exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPException -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <class name="FTPFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A stream obtained via this call must be closed before using other APIs of
+ this class or else the invocation will block.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BLOCK_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} backed by an FTP client provided by <a
+ href="http://commons.apache.org/net/">Apache Commons Net</a>.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPInputStream -->
+ <class name="FTPInputStream" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPInputStream" type="java.io.InputStream, org.apache.commons.net.ftp.FTPClient, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readLimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPInputStream -->
+</package>
+<package name="org.apache.hadoop.fs.kfs">
+ <!-- start class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+ <class name="KosmosFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="KosmosFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return null if the file doesn't exist; otherwise, get the
+ locations of the various chunks of the file file from KFS.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A FileSystem backed by KFS.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.permission">
+ <!-- start class org.apache.hadoop.fs.permission.AccessControlException -->
+ <class name="AccessControlException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AccessControlException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor is needed for unwrapping from
+ {@link org.apache.hadoop.ipc.RemoteException}.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an {@link AccessControlException}
+ with the specified detail message.
+ @param s the detail message.]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[An exception class for access control related issues.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.AccessControlException -->
+ <!-- start class org.apache.hadoop.fs.permission.FsAction -->
+ <class name="FsAction" extends="java.lang.Enum&lt;org.apache.hadoop.fs.permission.FsAction&gt;"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.fs.permission.FsAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="implies" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[Return true if this action implies that action.
+ @param that]]>
+ </doc>
+ </method>
+ <method name="and" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[AND operation.]]>
+ </doc>
+ </method>
+ <method name="or" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[OR operation.]]>
+ </doc>
+ </method>
+ <method name="not" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[NOT operation.]]>
+ </doc>
+ </method>
+ <field name="INDEX" type="int"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Octal representation]]>
+ </doc>
+ </field>
+ <field name="SYMBOL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Symbolic representation]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[File system actions, e.g. read, write, etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsAction -->
+ <!-- start class org.apache.hadoop.fs.permission.FsPermission -->
+ <class name="FsPermission" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given {@link FsAction}.
+ @param u user action
+ @param g group action
+ @param o other action]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="short"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given mode.
+ @param mode
+ @see #toShort()]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor
+
+ @param other other permission]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="permission" type="short"/>
+ <doc>
+ <![CDATA[Create an immutable {@link FsPermission} object.]]>
+ </doc>
+ </method>
+ <method name="getUserAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getGroupAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getOtherAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return other {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="fromShort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="short"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link FsPermission} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="toShort" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Encode the object to a short.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply a umask to this permission and return a new one]]>
+ </doc>
+ </method>
+ <method name="getUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="setUMask"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Set the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="getDefault" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default permission.]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unixSymbolicPermission" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create a FsPermission from a Unix symbolic permission string
+ @param unixSymbolicPermission e.g. "-rw-rw-rw-"]]>
+ </doc>
+ </method>
+ <field name="UMASK_LABEL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[umask property label]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_UMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A class for file/directory permissions.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsPermission -->
+ <!-- start class org.apache.hadoop.fs.permission.PermissionStatus -->
+ <class name="PermissionStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="PermissionStatus" type="java.lang.String, java.lang.String, org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <param name="group" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Create an immutable {@link PermissionStatus} object.]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user name]]>
+ </doc>
+ </method>
+ <method name="getGroupName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group name]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return permission]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply umask.
+ @see FsPermission#applyUMask(FsPermission)]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link PermissionStatus} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a {@link PermissionStatus} from its base components.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store permission related information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.PermissionStatus -->
+</package>
+<package name="org.apache.hadoop.fs.s3">
+ <!-- start class org.apache.hadoop.fs.s3.Block -->
+ <class name="Block" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Block" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Holds metadata about a block of data being stored in a {@link FileSystemStore}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.Block -->
+ <!-- start interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <interface name="FileSystemStore" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inode" type="org.apache.hadoop.fs.s3.INode"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="inodeExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveINode" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveBlock" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="byteRangeStart" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listSubPaths" return="java.util.Set&lt;org.apache.hadoop.fs.Path&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listDeepSubPaths" return="java.util.Set&lt;org.apache.hadoop.fs.Path&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="purge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete everything. Used for testing.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="dump"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Diagnostic method to dump all INodes to the console.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for storing and retrieving {@link INode}s and {@link Block}s.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <!-- start class org.apache.hadoop.fs.s3.INode -->
+ <class name="INode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="INode" type="org.apache.hadoop.fs.s3.INode.FileType, org.apache.hadoop.fs.s3.Block[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.fs.s3.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileType" return="org.apache.hadoop.fs.s3.INode.FileType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSerializedLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="serialize" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deserialize" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="FILE_TYPES" type="org.apache.hadoop.fs.s3.INode.FileType[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DIRECTORY_INODE" type="org.apache.hadoop.fs.s3.INode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Holds file metadata including type (regular file, or directory),
+ and the list of blocks that are pointers to the data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.INode -->
+ <!-- start class org.apache.hadoop.fs.s3.MigrationTool -->
+ <class name="MigrationTool" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="MigrationTool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ This class is a tool for migrating data from an older to a newer version
+ of an S3 filesystem.
+ </p>
+ <p>
+ All files in the filesystem are migrated by re-writing the block metadata
+ - no datafiles are touched.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.MigrationTool -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Credentials -->
+ <class name="S3Credentials" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Credentials"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@throws IllegalArgumentException if credentials for S3 cannot be
+ determined.]]>
+ </doc>
+ </method>
+ <method name="getAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSecretAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Extracts AWS credentials from the filesystem URI or configuration.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Credentials -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Exception -->
+ <class name="S3Exception" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Exception" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown if there is a problem communicating with Amazon S3.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Exception -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <class name="S3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="S3FileSystem" type="org.apache.hadoop.fs.s3.FileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[FileStatus for S3 file systems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A block-based {@link FileSystem} backed by
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ </p>
+ @see NativeS3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <class name="S3FileSystemException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystemException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when there is a fatal exception while using {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <!-- start class org.apache.hadoop.fs.s3.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="org.apache.hadoop.fs.s3.S3FileSystemException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when Hadoop cannot read the version of the data stored
+ in {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.VersionMismatchException -->
+</package>
+<package name="org.apache.hadoop.fs.s3native">
+ <!-- start class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+ <class name="NativeS3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeS3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NativeS3FileSystem" type="org.apache.hadoop.fs.s3native.NativeFileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ If <code>f</code> is a file, this method will make a single call to S3.
+ If <code>f</code> is a directory, this method will make a maximum of
+ (<i>n</i> / 1000) + 2 calls to S3, where <i>n</i> is the total number of
+ files and directories contained directly in <code>f</code>.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} for reading and writing files stored on
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation
+ stores files on S3 in their
+ native form so they can be read by other S3 tools.
+ </p>
+ @see org.apache.hadoop.fs.s3.S3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.shell">
+ <!-- start class org.apache.hadoop.fs.shell.Command -->
+ <class name="Command" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Command" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the command's name excluding the leading character -]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the command on the input path
+
+ @param path the input path
+ @throws IOException if any error occurs]]>
+ </doc>
+ </method>
+ <method name="runAll" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[For each source path, execute the command
+
+ @return 0 if it runs successfully; -1 if it fails]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="args" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract class for the execution of a file system command]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Command -->
+ <!-- start class org.apache.hadoop.fs.shell.CommandFormat -->
+ <class name="CommandFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CommandFormat" type="java.lang.String, int, int, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor]]>
+ </doc>
+ </constructor>
+ <method name="parse" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="pos" type="int"/>
+ <doc>
+ <![CDATA[Parse parameters starting from the given position
+
+ @param args an array of input arguments
+ @param pos the position at which starts to parse
+ @return a list of parameters]]>
+ </doc>
+ </method>
+ <method name="getOpt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="option" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return if the option is set or not
+
+ @param option String representation of an option
+ @return true is the option is set; false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Parse the args of a command and check the format of args.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.CommandFormat -->
+ <!-- start class org.apache.hadoop.fs.shell.Count -->
+ <class name="Count" extends="org.apache.hadoop.fs.shell.Command"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Count" type="java.lang.String[], int, org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param cmd the count command
+ @param pos the starting index of the arguments
+ @param fs the file system handler]]>
+ </doc>
+ </constructor>
+ <method name="matches" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Check if a command is the count command
+
+ @param cmd A string representation of a command starting with "-"
+ @return true if this is a count command; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USAGE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DESCRIPTION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Count the number of directories, files, bytes, quota, and remaining quota.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Count -->
+</package>
+<package name="org.apache.hadoop.io">
+ <!-- start class org.apache.hadoop.io.AbstractMapWritable -->
+ <class name="AbstractMapWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="AbstractMapWritable"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor.]]>
+ </doc>
+ </constructor>
+ <method name="addToMap"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add a Class to the maps if it is not already present.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="byte"/>
+ <doc>
+ <![CDATA[@return the Class class for the specified id]]>
+ </doc>
+ </method>
+ <method name="getId" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[@return the id for the specified Class]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Used by child copy constructors.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the conf]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@param conf the conf to set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract base class for MapWritable and SortedMapWritable
+
+ Unlike org.apache.nutch.crawl.MapWritable, this class allows creation of
+ MapWritable&lt;Writable, MapWritable&gt; so the CLASS_TO_ID and ID_TO_CLASS
+ maps travel with the class instead of being static.
+
+ Class ids range from 1 to 127 so there can be at most 127 distinct classes
+ in any specific map instance.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.AbstractMapWritable -->
+ <!-- start class org.apache.hadoop.io.ArrayFile -->
+ <class name="ArrayFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A dense file-based mapping from integers to values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Reader -->
+ <class name="ArrayFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an array reader for the named file.]]>
+ </doc>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader before its <code>n</code>th value.]]>
+ </doc>
+ </method>
+ <method name="next" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read and return the next value in the file.]]>
+ </doc>
+ </method>
+ <method name="key" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the key associated with the most recent call to {@link
+ #seek(long)}, {@link #next(Writable)}, or {@link
+ #get(long,Writable)}.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the <code>n</code>th value in the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Reader -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Writer -->
+ <class name="ArrayFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a value to the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Writer -->
+ <!-- start class org.apache.hadoop.io.ArrayWritable -->
+ <class name="ArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for arrays containing instances of a class. The elements of this
+ writable must all be instances of the same class. If this writable will be
+ the input for a Reducer, you will need to create a subclass that sets the
+ value to be of the proper type.
+
+ For example:
+ <code>
+ public class IntArrayWritable extends ArrayWritable {
+ public IntArrayWritable() {
+ super(IntWritable.class);
+ }
+ }
+ </code>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayWritable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable -->
+ <class name="BooleanWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BooleanWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BooleanWritable" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="get" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for booleans.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <class name="BooleanWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BooleanWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BooleanWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.BytesWritable -->
+ <class name="BytesWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-size sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="BytesWritable" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a BytesWritable using the byte array as the initial value.
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the BytesWritable.
+ @return The data is only valid between 0 and getSize() - 1.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current size of the buffer.]]>
+ </doc>
+ </method>
+ <method name="setSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Change the size of the buffer. The values in the old range are preserved
+ and any new values are undefined. The capacity is changed if it is
+ necessary.
+ @param size The new number of bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum size that could handled without
+ resizing the backing storage.
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_cap" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved.
+ @param new_cap The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="org.apache.hadoop.io.BytesWritable"/>
+ <doc>
+ <![CDATA[Set the BytesWritable to the contents of the given newData.
+ @param newData the value to set this BytesWritable to.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Set the value to a copy of the given byte range
+ @param newData the new values to copy in
+ @param offset the offset in newData to start at
+ @param length the number of bytes to copy]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="right_obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Define the sort order of the BytesWritable.
+ @param right_obj The other bytes writable
+ @return Positive if left is bigger than right, 0 if they are equal, and
+ negative if left is smaller than right.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="right_obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Are the two byte sequences equal?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Generate the stream of bytes as hex pairs separated by ' '.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is usable as a key or value.
+ It is resizable and distinguishes between the size of the seqeunce and
+ the current capacity. The hash function is the front of the md5 of the
+ buffer. The sort order is the same as memcmp.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable -->
+ <!-- start class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <class name="BytesWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BytesWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BytesWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ByteWritable -->
+ <class name="ByteWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="ByteWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ByteWritable" type="byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Set the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a ByteWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two ByteWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for a single byte.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable -->
+ <!-- start class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <class name="ByteWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ByteWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for ByteWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <!-- start interface org.apache.hadoop.io.Closeable -->
+ <interface name="Closeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="use java.io.Closeable">
+ <implements name="java.io.Closeable"/>
+ <doc>
+ <![CDATA[@deprecated use java.io.Closeable]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Closeable -->
+ <!-- start class org.apache.hadoop.io.CompressedWritable -->
+ <class name="CompressedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="CompressedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ensureInflated"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Must be called by all methods which access fields to ensure that the data
+ has been uncompressed.]]>
+ </doc>
+ </method>
+ <method name="readFieldsCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #readFields(DataInput)}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #write(DataOutput)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base-class for Writables which store themselves compressed and lazily
+ inflate on field access. This is useful for large objects whose fields are
+ not be altered during a map or reduce operation: leaving the field data
+ compressed makes copying the instance from one file to another much
+ faster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.CompressedWritable -->
+ <!-- start class org.apache.hadoop.io.DataInputBuffer -->
+ <class name="DataInputBuffer" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataInputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataInput} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataInputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataInputBuffer buffer = new DataInputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using DataInput methods ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataInputBuffer -->
+ <!-- start class org.apache.hadoop.io.DataOutputBuffer -->
+ <class name="DataOutputBuffer" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataOutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <constructor name="DataOutputBuffer" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a DataInput directly into the buffer.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataOutput} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataOutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataOutputBuffer buffer = new DataOutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using DataOutput methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataOutputBuffer -->
+ <!-- start class org.apache.hadoop.io.DefaultStringifier -->
+ <class name="DefaultStringifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Stringifier&lt;T&gt;"/>
+ <constructor name="DefaultStringifier" type="org.apache.hadoop.conf.Configuration, java.lang.Class&lt;T&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="fromString" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="store"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="item" type="K"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the item in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to store
+ @param item the object to be stored
+ @param keyName the name of the key to use
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="load" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class&lt;K&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="storeArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="items" type="K[]"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the array of items in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param items the objects to be stored
+ @param keyName the name of the key to use
+ @throws IndexOutOfBoundsException if the items array is empty
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="loadArray" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class&lt;K&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the array of objects from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[DefaultStringifier is the default implementation of the {@link Stringifier}
+ interface which stringifies the objects using base64 encoding of the
+ serialized version of the objects. The {@link Serializer} and
+ {@link Deserializer} are obtained from the {@link SerializationFactory}.
+ <br>
+ DefaultStringifier offers convenience methods to store/load objects to/from
+ the configuration.
+
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DefaultStringifier -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable -->
+ <class name="DoubleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="DoubleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DoubleWritable" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="double"/>
+ </method>
+ <method name="get" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a DoubleWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Writable for Double values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <class name="DoubleWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DoubleWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for DoubleWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.FloatWritable -->
+ <class name="FloatWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="FloatWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FloatWritable" type="float"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="float"/>
+ <doc>
+ <![CDATA[Set the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a FloatWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two FloatWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for floats.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable -->
+ <!-- start class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <class name="FloatWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FloatWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for FloatWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.GenericWritable -->
+ <class name="GenericWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="GenericWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Set the instance that is wrapped.
+
+ @param obj]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the wrapped instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTypes" return="java.lang.Class[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return all classes that may be wrapped. Subclasses should implement this
+ to return a constant array of classes.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper for Writable instances.
+ <p>
+ When two sequence files, which have same Key type but different Value
+ types, are mapped out to reduce, multiple Value types is not allowed.
+ In this case, this class can help you wrap instances with different types.
+ </p>
+
+ <p>
+ Compared with <code>ObjectWritable</code>, this class is much more effective,
+ because <code>ObjectWritable</code> will append the class declaration as a String
+ into the output file in every Key-Value pair.
+ </p>
+
+ <p>
+ Generic Writable implements {@link Configurable} interface, so that it will be
+ configured by the framework. The configuration is passed to the wrapped objects
+ implementing {@link Configurable} interface <i>before deserialization</i>.
+ </p>
+
+ how to use it: <br>
+ 1. Write your own class, such as GenericObject, which extends GenericWritable.<br>
+ 2. Implements the abstract method <code>getTypes()</code>, defines
+ the classes which will be wrapped in GenericObject in application.
+ Attention: this classes defined in <code>getTypes()</code> method, must
+ implement <code>Writable</code> interface.
+ <br><br>
+
+ The code looks like this:
+ <blockquote><pre>
+ public class GenericObject extends GenericWritable {
+
+ private static Class[] CLASSES = {
+ ClassType1.class,
+ ClassType2.class,
+ ClassType3.class,
+ };
+
+ protected Class[] getTypes() {
+ return CLASSES;
+ }
+
+ }
+ </pre></blockquote>
+
+ @since Nov 8, 2006]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.GenericWritable -->
+ <!-- start class org.apache.hadoop.io.InputBuffer -->
+ <class name="InputBuffer" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link InputStream} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new InputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ InputBuffer buffer = new InputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using InputStream methods ...
+ }
+ </pre>
+ @see DataInputBuffer
+ @see DataOutput]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.InputBuffer -->
+ <!-- start class org.apache.hadoop.io.IntWritable -->
+ <class name="IntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="IntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="IntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a IntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two IntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for ints.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable -->
+ <!-- start class org.apache.hadoop.io.IntWritable.Comparator -->
+ <class name="IntWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IntWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for IntWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.IOUtils -->
+ <class name="IOUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="buffSize" type="int"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param buffSize the size of the buffer
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another. <strong>closes the input and output streams
+ at the end</strong>.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads len bytes in a loop.
+ @param in The InputStream to read from
+ @param buf The buffer to fill
+ @param off offset from the buffer
+ @param len the length of bytes to read
+ @throws IOException if it could not read requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Similar to readFully(). Skips bytes in a loop.
+ @param in The InputStream to skip bytes from
+ @param len number of bytes to skip.
+ @throws IOException if it could not skip requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="closeables" type="java.io.Closeable[]"/>
+ <doc>
+ <![CDATA[Close the Closeable objects and <b>ignore</b> any {@link IOException} or
+ null pointers. Must only be used for cleanup in exception handlers.
+ @param log the log to record problems to at debug level. Can be null.
+ @param closeables the objects to close]]>
+ </doc>
+ </method>
+ <method name="closeStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Closeable"/>
+ <doc>
+ <![CDATA[Closes the stream ignoring {@link IOException}.
+ Must only be called in cleaning up from exception handlers.
+ @param stream the Stream to close]]>
+ </doc>
+ </method>
+ <method name="closeSocket"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <doc>
+ <![CDATA[Closes the socket ignoring {@link IOException}
+ @param sock the Socket to close]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An utility class for I/O related functionality.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils -->
+ <!-- start class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <class name="IOUtils.NullOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils.NullOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[/dev/null of OutputStreams.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <!-- start class org.apache.hadoop.io.LongWritable -->
+ <class name="LongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="LongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a LongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two LongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable -->
+ <!-- start class org.apache.hadoop.io.LongWritable.Comparator -->
+ <class name="LongWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <class name="LongWritable.DecreasingComparator" extends="org.apache.hadoop.io.LongWritable.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.DecreasingComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A decreasing Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <!-- start class org.apache.hadoop.io.MapFile -->
+ <class name="MapFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="oldName" type="java.lang.String"/>
+ <param name="newName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames an existing map directory.]]>
+ </doc>
+ </method>
+ <method name="delete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deletes the named map file.]]>
+ </doc>
+ </method>
+ <method name="fix" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valueClass" type="java.lang.Class"/>
+ <param name="dryrun" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This method attempts to fix a corrupt MapFile by re-creating its index.
+ @param fs filesystem
+ @param dir directory containing the MapFile data and index
+ @param keyClass key class (has to be a subclass of Writable)
+ @param valueClass value class (has to be a subclass of Writable)
+ @param dryrun do not perform any changes, just report what needs to be done
+ @return number of valid entries in this MapFile, or -1 if no fixing was needed
+ @throws Exception]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="INDEX_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the index file.]]>
+ </doc>
+ </field>
+ <field name="DATA_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the data file.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A file-based map from keys to values.
+
+ <p>A map is a directory containing two files, the <code>data</code> file,
+ containing all keys and values in the map, and a smaller <code>index</code>
+ file, containing a fraction of the keys. The fraction is determined by
+ {@link Writer#getIndexInterval()}.
+
+ <p>The index file is read entirely into memory. Thus key implementations
+ should try to keep themselves small.
+
+ <p>Map files are created by adding entries in-order. To maintain a large
+ database, perform updates by copying the previous version of a database and
+ merging in a sorted change list, to create a new version of the database in
+ a new file. Sorting large change lists can be done with {@link
+ SequenceFile.Sorter}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile -->
+ <!-- start class org.apache.hadoop.io.MapFile.Reader -->
+ <class name="MapFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map using the named comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Hook to allow subclasses to defer opening streams until further
+ initialization is complete.
+ @see #createDataFileReader(FileSystem, Path, Configuration)]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="open"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dirName" type="java.lang.String"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDataFileReader" return="org.apache.hadoop.io.SequenceFile.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dataFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link SequenceFile.Reader} returned.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Re-positions the reader before its first key.]]>
+ </doc>
+ </method>
+ <method name="midKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the key at approximately the middle of the file.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalKey"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the final key from the file.
+
+ @param key key to read into]]>
+ </doc>
+ </method>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader at the named key, or if none such exists, at the
+ first entry after the named key. Returns true iff the named key exists
+ in this map.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the map into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ the end of the map]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the value for the named key, or null if none exists.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+ Returns <code>key</code> or if it does not exist, at the first entry
+ after the named key.
+
+- * @param key - key that we're trying to find
+- * @param val - data value if key is found
+- * @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <param name="before" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+
+ @param key - key that we're trying to find
+ @param val - data value if key is found
+ @param before - IF true, and <code>key</code> does not exist, return
+ the first entry that falls just before the <code>key</code>. Otherwise,
+ return the record that sorts just after.
+ @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Reader -->
+ <!-- start class org.apache.hadoop.io.MapFile.Writer -->
+ <class name="MapFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <method name="getIndexInterval" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of entries that are added before an index entry is added.]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval.
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval and stores it in conf
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair to the map. The key must be greater or equal
+ to the previous key added to the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writes a new map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Writer -->
+ <!-- start class org.apache.hadoop.io.MapWritable -->
+ <class name="MapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Map&lt;org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapWritable" type="org.apache.hadoop.io.MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map&lt;? extends org.apache.hadoop.io.Writable, ? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable Map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapWritable -->
+ <!-- start class org.apache.hadoop.io.MD5Hash -->
+ <class name="MD5Hash" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="MD5Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash from a hex string.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash with a specified value.]]>
+ </doc>
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs, reads and returns an instance.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+ <doc>
+ <![CDATA[Copy the contents of another instance into this instance.]]>
+ </doc>
+ </method>
+ <method name="getDigest" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the digest bytes.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="halfDigest" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a half-sized version of this MD5. Fits in a long]]>
+ </doc>
+ </method>
+ <method name="quarterDigest" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a 32-bit digest of the MD5.
+ @return the first 4 bytes of the md5]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is an MD5Hash whose digest contains the
+ same values.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for this object.
+ Only uses the first 4 bytes, since md5s are evenly distributed.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares this object with the specified object for order.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="setDigest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the digest value from a hex string.]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Writable for MD5 hash values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash -->
+ <!-- start class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <class name="MD5Hash.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MD5Hash.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for MD5Hash keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <!-- start class org.apache.hadoop.io.MultipleIOException -->
+ <class name="MultipleIOException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getExceptions" return="java.util.List&lt;java.io.IOException&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the underlying exceptions]]>
+ </doc>
+ </method>
+ <method name="createIOException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="exceptions" type="java.util.List&lt;java.io.IOException&gt;"/>
+ <doc>
+ <![CDATA[A convenient method to create an {@link IOException}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Encapsulate a list of {@link IOException} into an {@link IOException}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MultipleIOException -->
+ <!-- start class org.apache.hadoop.io.NullWritable -->
+ <class name="NullWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <method name="get" return="org.apache.hadoop.io.NullWritable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the single instance of this class.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Singleton Writable with no data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable -->
+ <!-- start class org.apache.hadoop.io.NullWritable.Comparator -->
+ <class name="NullWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator &quot;optimized&quot; for NullWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ObjectWritable -->
+ <class name="ObjectWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="ObjectWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Class, java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the instance, or null if none.]]>
+ </doc>
+ </method>
+ <method name="getDeclaredClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the class this is meant to be.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Reset the instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeObject"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="instance" type="java.lang.Object"/>
+ <param name="declaredClass" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="objectWritable" type="org.apache.hadoop.io.ObjectWritable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A polymorphic Writable that writes an instance with it's class name.
+ Handles arrays, strings and primitive types without a Writable wrapper.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ObjectWritable -->
+ <!-- start class org.apache.hadoop.io.OutputBuffer -->
+ <class name="OutputBuffer" extends="java.io.FilterOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.OutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a InputStream directly into the buffer.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link OutputStream} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new OutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ OutputBuffer buffer = new OutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using OutputStream methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>
+ @see DataOutputBuffer
+ @see InputBuffer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.OutputBuffer -->
+ <!-- start interface org.apache.hadoop.io.RawComparator -->
+ <interface name="RawComparator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Comparator&lt;T&gt;"/>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link Comparator} that operates directly on byte representations of
+ objects.
+ </p>
+ @param <T>
+ @see DeserializerComparator]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.RawComparator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile -->
+ <class name="SequenceFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapred.JobConf#getMapOutputCompressionType()}
+ to get {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the compression type for the reduce outputs
+ @param job the job config to look in
+ @return the kind of compression to use
+ @deprecated Use {@link org.apache.hadoop.mapred.JobConf#getMapOutputCompressionType()}
+ to get {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.JobConf#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the compression type for sequence files.
+ @param job the configuration to modify
+ @param val the new compression type (none, block, record)
+ @deprecated Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.JobConf#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param bufferSize buffer size for the underlaying outputstream.
+ @param replication replication factor for the file.
+ @param blockSize block size for the file.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="SYNC_INTERVAL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes between sync points.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<code>SequenceFile</code>s are flat files consisting of binary key/value
+ pairs.
+
+ <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and
+ {@link Sorter} classes for writing, reading and sorting respectively.</p>
+
+ There are three <code>SequenceFile</code> <code>Writer</code>s based on the
+ {@link CompressionType} used to compress key/value pairs:
+ <ol>
+ <li>
+ <code>Writer</code> : Uncompressed records.
+ </li>
+ <li>
+ <code>RecordCompressWriter</code> : Record-compressed files, only compress
+ values.
+ </li>
+ <li>
+ <code>BlockCompressWriter</code> : Block-compressed files, both keys &
+ values are collected in 'blocks'
+ separately and compressed. The size of
+ the 'block' is configurable.
+ </ol>
+
+ <p>The actual compression algorithm used to compress key and/or values can be
+ specified by using the appropriate {@link CompressionCodec}.</p>
+
+ <p>The recommended way is to use the static <tt>createWriter</tt> methods
+ provided by the <code>SequenceFile</code> to chose the preferred format.</p>
+
+ <p>The {@link Reader} acts as the bridge and can read any of the above
+ <code>SequenceFile</code> formats.</p>
+
+ <h4 id="Formats">SequenceFile Formats</h4>
+
+ <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
+ depending on the <code>CompressionType</code> specified. All of them share a
+ <a href="#Header">common header</a> described below.
+
+ <h5 id="Header">SequenceFile Header</h5>
+ <ul>
+ <li>
+ version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual
+ version number (e.g. SEQ4 or SEQ6)
+ </li>
+ <li>
+ keyClassName -key class
+ </li>
+ <li>
+ valueClassName - value class
+ </li>
+ <li>
+ compression - A boolean which specifies if compression is turned on for
+ keys/values in this file.
+ </li>
+ <li>
+ blockCompression - A boolean which specifies if block-compression is
+ turned on for keys/values in this file.
+ </li>
+ <li>
+ compression codec - <code>CompressionCodec</code> class which is used for
+ compression of keys and/or values (if compression is
+ enabled).
+ </li>
+ <li>
+ metadata - {@link Metadata} for this file.
+ </li>
+ <li>
+ sync - A sync marker to denote end of the header.
+ </li>
+ </ul>
+
+ <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li>Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li><i>Compressed</i> Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record <i>Block</i>
+ <ul>
+ <li>Compressed key-lengths block-size</li>
+ <li>Compressed key-lengths block</li>
+ <li>Compressed keys block-size</li>
+ <li>Compressed keys block</li>
+ <li>Compressed value-lengths block-size</li>
+ <li>Compressed value-lengths block</li>
+ <li>Compressed values block-size</li>
+ <li>Compressed values block</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <p>The compressed blocks of key lengths and value lengths consist of the
+ actual lengths of individual keys/values encoded in ZeroCompressedInteger
+ format.</p>
+
+ @see CompressionCodec]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <class name="SequenceFile.CompressionType" extends="java.lang.Enum&lt;org.apache.hadoop.io.SequenceFile.CompressionType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.SequenceFile.CompressionType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression type used to compress key/value pairs in the
+ {@link SequenceFile}.
+
+ @see SequenceFile.Writer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <class name="SequenceFile.Metadata" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFile.Metadata" type="java.util.TreeMap&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="getMetadata" return="java.util.TreeMap&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The class encapsulating with the metadata of a file.
+ The metadata of a file is a list of attribute name/value
+ pairs of Text type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Reader -->
+ <class name="SequenceFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Reader" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the named file.]]>
+ </doc>
+ </constructor>
+ <method name="openFile" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link FSDataInputStream} returned.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the key class.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the value class.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="isCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if values are compressed.]]>
+ </doc>
+ </method>
+ <method name="isBlockCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if records are block-compressed.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="getMetadata" return="org.apache.hadoop.io.SequenceFile.Metadata"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the metadata object of the file]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file into <code>key</code>, skipping its
+ value. True if another entry exists, and false at end of file.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the file into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ end of file]]>
+ </doc>
+ </method>
+ <method name="next" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.">
+ <param name="buffer" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.]]>
+ </doc>
+ </method>
+ <method name="createValueBytes" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRaw" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' records.
+ @param key - The buffer into which the key is read
+ @param val - The 'raw' value
+ @return Returns the total record length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawKey" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' keys.
+ @param key - The buffer into which the key is read
+ @return Returns the key length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="java.lang.Object"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file, skipping its
+ value. Return null at end of file.]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' values.
+ @param val - The 'raw' value
+ @return Returns the value length
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the current byte position in the input file.
+
+ <p>The position passed must be a position returned by {@link
+ SequenceFile.Writer#getLength()} when writing this file. To seek to an arbitrary
+ position, use {@link SequenceFile.Reader#sync(long)}.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the next sync mark past a given position.]]>
+ </doc>
+ </method>
+ <method name="syncSeen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true iff the previous call to next passed a sync mark.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current byte position in the input file.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reads key/value pairs from a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <class name="SequenceFile.Sorter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge files containing the named classes.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.io.RawComparator, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge using an arbitrary {@link RawComparator}.]]>
+ </doc>
+ </constructor>
+ <method name="setFactor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="factor" type="int"/>
+ <doc>
+ <![CDATA[Set the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="getFactor" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="setMemory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="memory" type="int"/>
+ <doc>
+ <![CDATA[Set the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="getMemory" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setProgressable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progressable" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Set the progressable object in order to report progress.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files into an output file.
+ @param inFiles the files to be sorted
+ @param outFile the sorted output file
+ @param deleteInput should the input files be deleted as they are read?]]>
+ </doc>
+ </method>
+ <method name="sortAndIterate" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files and return an iterator.
+ @param inFiles the files to be sorted
+ @param tempDir the directory where temp files are created during sort
+ @param deleteInput should the input files be deleted as they are read?
+ @return iterator the RawKeyValueIterator]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The backwards compatible interface to sort.
+ @param inFile the input file to sort
+ @param outFile the sorted output file]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="segments" type="java.util.List&lt;org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor&gt;"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the list of segments of type <code>SegmentDescriptor</code>
+ @param segments the list of SegmentDescriptors
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIterator
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[] using a max factor value
+ that is already set
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="factor" type="int"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param factor the factor that will be used as the maximum merge fan-in
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInputs" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param tempDir the directory for creating temp files during merge
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cloneFileAttributes" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="prog" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clones the attributes (like compression of the input file and creates a
+ corresponding Writer
+ @param inputFile the path of the input file whose attributes should be
+ cloned
+ @param outputFile the path of the output file
+ @param prog the Progressable to report status during the file write
+ @return Writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="records" type="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"/>
+ <param name="writer" type="org.apache.hadoop.io.SequenceFile.Writer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes records from RawKeyValueIterator into a file represented by the
+ passed writer
+ @param records the RawKeyValueIterator
+ @param writer the Writer created earlier
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merge the provided files.
+ @param inFiles the array of input path names
+ @param outFile the final output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sorts key/value pairs in a sequence-format file.
+
+ <p>For best performance, applications should make sure that the {@link
+ Writable#readFields(DataInput)} implementation of their keys is
+ very efficient. In particular, it should avoid allocating memory.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <interface name="SequenceFile.Sorter.RawKeyValueIterator" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw key
+ @return DataOutputBuffer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getValue" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw value
+ @return ValueBytes
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets up the current key and value (for getKey and getValue)
+ @return true if there exists a key/value, false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[closes the iterator so that the underlying streams can be closed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the Progress object; this has a float (0.0 - 1.0)
+ indicating the bytes processed by the iterator so far]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to iterate over raw keys/values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <class name="SequenceFile.Sorter.SegmentDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="SequenceFile.Sorter.SegmentDescriptor" type="long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a segment
+ @param segmentOffset the offset of the segment in the file
+ @param segmentLength the length of the segment
+ @param segmentPathName the path name of the file containing the segment]]>
+ </doc>
+ </constructor>
+ <method name="doSync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do the sync checks]]>
+ </doc>
+ </method>
+ <method name="preserveInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="preserve" type="boolean"/>
+ <doc>
+ <![CDATA[Whether to delete the files when no longer needed]]>
+ </doc>
+ </method>
+ <method name="shouldPreserveInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRawKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the rawKey object with the key returned by the Reader
+ @return true if there is a key returned; false, otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rawValue" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the passed rawValue with the value corresponding to the key
+ read earlier
+ @param rawValue
+ @return the length of the value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the stored rawKey]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The default cleanup. Subclasses can override this with a custom
+ cleanup]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class defines a merge segment. This class can be subclassed to
+ provide a customized cleanup method implementation. In this
+ implementation, cleanup closes the file handle and deletes the file]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <interface name="SequenceFile.ValueBytes" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the uncompressed bytes to the outStream.
+ @param outStream : Stream to write uncompressed bytes into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to outStream.
+ Note: that it will NOT compress the bytes if they are not compressed.
+ @param outStream : Stream to write compressed bytes into.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Size of stored data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to 'raw' values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Writer -->
+ <class name="SequenceFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, int, short, long, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a sync point]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="appendRaw"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keyData" type="byte[]"/>
+ <param name="keyOffset" type="int"/>
+ <param name="keyLength" type="int"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current length of the output file.
+
+ <p>This always returns a synchronized position. In other words,
+ immediately after calling {@link SequenceFile.Reader#seek(long)} with a position
+ returned by this method, {@link SequenceFile.Reader#next(Writable)} may be called. However
+ the key may be earlier in the file than key last written when this
+ method was called (e.g., with block-compression, it may be the first key
+ in the block that was being written when this method was called).]]>
+ </doc>
+ </method>
+ <field name="keySerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="uncompressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="compressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Write key/value pairs to a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SetFile -->
+ <class name="SetFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A file-based set of keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile -->
+ <!-- start class org.apache.hadoop.io.SetFile.Reader -->
+ <class name="SetFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set using the named comparator.]]>
+ </doc>
+ </constructor>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in a set into <code>key</code>. Returns
+ true if such a key exists and false when at the end of the set.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the matching key from a set into <code>key</code>.
+ Returns <code>key</code>, or null if no match exists.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SetFile.Writer -->
+ <class name="SetFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="pass a Configuration too">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named set for keys of the named class.
+ @deprecated pass a Configuration too]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element class and compression type.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element comparator and compression type.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key to a set. The key must be strictly greater than the
+ previous key added to the set.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SortedMapWritable -->
+ <class name="SortedMapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="SortedMapWritable" type="org.apache.hadoop.io.SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="comparator" return="java.util.Comparator&lt;? super org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="firstKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="headMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="lastKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="subMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="tailMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set&lt;org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map&lt;? extends org.apache.hadoop.io.WritableComparable, ? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable SortedMap.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SortedMapWritable -->
+ <!-- start interface org.apache.hadoop.io.Stringifier -->
+ <interface name="Stringifier" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Converts the object to a string representation
+ @param obj the object to convert
+ @return the string representation of the object
+ @throws IOException if the object cannot be converted]]>
+ </doc>
+ </method>
+ <method name="fromString" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from its string representation.
+ @param str the string representation of the object
+ @return restored object
+ @throws IOException if the object cannot be restored]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes this object.
+ @throws IOException if an I/O error occurs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stringifier interface offers two methods to convert an object
+ to a string representation and restore the object given its
+ string representation.
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Stringifier -->
+ <!-- start class org.apache.hadoop.io.Text -->
+ <class name="Text" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Text" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a string.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="org.apache.hadoop.io.Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from another text.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a byte array.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the raw bytes; however, only data up to {@link #getLength()} is
+ valid.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of bytes in the byte array]]>
+ </doc>
+ </method>
+ <method name="charAt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="int"/>
+ <doc>
+ <![CDATA[Returns the Unicode Scalar Value (32-bit integer value)
+ for the character at <code>position</code>. Note that this
+ method avoids using the converter or doing String instatiation
+ @return the Unicode scalar value at position or -1
+ if the position is invalid or points to a
+ trailing byte]]>
+ </doc>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Finds any occurence of <code>what</code> in the backing
+ buffer, starting as position <code>start</code>. The starting
+ position is measured in bytes and the return value is in
+ terms of byte position in the buffer. The backing buffer is
+ not converted to a string for this operation.
+ @return byte position of the first occurence of the search
+ string in the UTF-8 buffer or -1 if not found]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <doc>
+ <![CDATA[Set to a utf8 byte array]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[copy a text.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Set the Text to range of bytes
+ @param utf8 the data to copy from
+ @param start the first position of the new string
+ @param len the number of bytes of the new string]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Append a range of bytes to the end of the given text
+ @param utf8 the data to copy from
+ @param start the first position to append from utf8
+ @param len the number of bytes to append]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Clear the string to empty.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert text back to string
+ @see java.lang.Object#toString()]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialize]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one Text in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialize
+ write this object to out
+ length uses zero-compressed encoding
+ @see Writable#write(DataOutput)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare two Texts bytewise using standard UTF8 ordering.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a Text with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[hash function]]>
+ </doc>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If the input is malformed,
+ replace by a default value.]]>
+ </doc>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If the input is malformed,
+ invalid chars are replaced by a default value.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF8 encoded string from in]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF8 encoded string to out]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check if a byte array contains valid utf-8
+ @param utf8 byte array
+ @throws MalformedInputException if the byte array contains invalid utf-8]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check to see if a byte array is valid utf-8
+ @param utf8 the array of bytes
+ @param start the offset of the first byte in the array
+ @param len the length of the byte sequence
+ @throws MalformedInputException if the byte array contains invalid bytes]]>
+ </doc>
+ </method>
+ <method name="bytesToCodePoint" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="java.nio.ByteBuffer"/>
+ <doc>
+ <![CDATA[Returns the next code point at the current position in
+ the buffer. The buffer's position will be incremented.
+ Any mark set on this buffer will be changed by this method!]]>
+ </doc>
+ </method>
+ <method name="utf8Length" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[For the given string, returns the number of UTF-8 bytes
+ required to encode the string.
+ @param string text to encode
+ @return number of UTF-8 bytes required to encode]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class stores text using standard UTF8 encoding. It provides methods
+ to serialize, deserialize, and compare texts at byte level. The type of
+ length is integer and is serialized using zero-compressed format. <p>In
+ addition, it provides methods for string traversal without converting the
+ byte array to a string. <p>Also includes utilities for
+ serializing/deserialing a string, coding/decoding a string, checking if a
+ byte array contains valid UTF8 code, calculating the length of an encoded
+ string.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text -->
+ <!-- start class org.apache.hadoop.io.Text.Comparator -->
+ <class name="Text.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Text.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for Text keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text.Comparator -->
+ <!-- start class org.apache.hadoop.io.TwoDArrayWritable -->
+ <class name="TwoDArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[][]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[][]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[][]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for 2D arrays containing a matrix of instances of a class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.TwoDArrayWritable -->
+ <!-- start class org.apache.hadoop.io.UTF8 -->
+ <class name="UTF8" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="replaced by Text">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UTF8" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <constructor name="UTF8" type="org.apache.hadoop.io.UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw bytes.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the encoded string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one UTF8 in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare two UTF8s.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert to a String.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a UTF8 with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convert a string to a UTF-8 encoded byte array.
+ @see String#getBytes(String)]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string.
+
+ @see DataInput#readUTF()]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF-8 encoded string.
+
+ @see DataOutput#writeUTF(String)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for strings that uses the UTF8 encoding.
+
+ <p>Also includes utilities for efficiently reading and writing UTF-8.
+
+ @deprecated replaced by Text]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8 -->
+ <!-- start class org.apache.hadoop.io.UTF8.Comparator -->
+ <class name="UTF8.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UTF8.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8.Comparator -->
+ <!-- start class org.apache.hadoop.io.VersionedWritable -->
+ <class name="VersionedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="VersionedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="byte"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the version number of the current implementation.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A base class for Writables that provides version checking.
+
+ <p>This is useful when a class may evolve, so that instances written by the
+ old version of the class may still be processed by the new version. To
+ handle this situation, {@link #readFields(DataInput)}
+ implementations should catch {@link VersionMismatchException}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionedWritable -->
+ <!-- start class org.apache.hadoop.io.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="byte, byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Thrown by {@link VersionedWritable#readFields(DataInput)} when the
+ version of an object being read does not match the current implementation
+ version as returned by {@link VersionedWritable#getVersion()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionMismatchException -->
+ <!-- start class org.apache.hadoop.io.VIntWritable -->
+ <class name="VIntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VIntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VIntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VIntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VIntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for integer values stored in variable-length format.
+ Such values take between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVInt(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VIntWritable -->
+ <!-- start class org.apache.hadoop.io.VLongWritable -->
+ <class name="VLongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VLongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VLongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VLongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VLongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs in a variable-length format. Such values take
+ between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVLong(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VLongWritable -->
+ <!-- start interface org.apache.hadoop.io.Writable -->
+ <interface name="Writable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the fields of this object to <code>out</code>.
+
+ @param out <code>DataOuput</code> to serialize this object into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the fields of this object from <code>in</code>.
+
+ <p>For efficiency, implementations should attempt to re-use storage in the
+ existing object where possible.</p>
+
+ @param in <code>DataInput</code> to deseriablize this object from.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A serializable object which implements a simple, efficient, serialization
+ protocol, based on {@link DataInput} and {@link DataOutput}.
+
+ <p>Any <code>key</code> or <code>value</code> type in the Hadoop Map-Reduce
+ framework implements this interface.</p>
+
+ <p>Implementations typically implement a static <code>read(DataInput)</code>
+ method which constructs a new instance, calls {@link #readFields(DataInput)}
+ and returns the instance.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritable implements Writable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public static MyWritable read(DataInput in) throws IOException {
+ MyWritable w = new MyWritable();
+ w.readFields(in);
+ return w;
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Writable -->
+ <!-- start interface org.apache.hadoop.io.WritableComparable -->
+ <interface name="WritableComparable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable&lt;T&gt;"/>
+ <doc>
+ <![CDATA[A {@link Writable} which is also {@link Comparable}.
+
+ <p><code>WritableComparable</code>s can be compared to each other, typically
+ via <code>Comparator</code>s. Any type which is to be used as a
+ <code>key</code> in the Hadoop Map-Reduce framework should implement this
+ interface.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritableComparable implements WritableComparable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public int compareTo(MyWritableComparable w) {
+ int thisValue = this.value;
+ int thatValue = ((IntWritable)o).value;
+ return (thisValue &lt; thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableComparable -->
+ <!-- start class org.apache.hadoop.io.WritableComparator -->
+ <class name="WritableComparator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator"/>
+ <constructor name="WritableComparator" type="java.lang.Class"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </constructor>
+ <constructor name="WritableComparator" type="java.lang.Class, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get a comparator for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link WritableComparable}
+ implementation.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the WritableComparable implementation class.]]>
+ </doc>
+ </method>
+ <method name="newKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a new {@link WritableComparable} instance.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Optimization hook. Override this to make SequenceFile.Sorter's scream.
+
+ <p>The default implementation reads the data into two {@link
+ WritableComparable}s (using {@link
+ Writable#readFields(DataInput)}, then calls {@link
+ #compare(WritableComparable,WritableComparable)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[Compare two WritableComparables.
+
+ <p> The default implementation uses the natural ordering, calling {@link
+ Comparable#compareTo(Object)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <method name="hashBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Compute hash for binary data.]]>
+ </doc>
+ </method>
+ <method name="readUnsignedShort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an unsigned short from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an integer from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a long from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator for {@link WritableComparable}s.
+
+ <p>This base implemenation uses the natural ordering. To define alternate
+ orderings, override {@link #compare(WritableComparable,WritableComparable)}.
+
+ <p>One may optimize compare-intensive operations by overriding
+ {@link #compare(byte[],int,int,byte[],int,int)}. Static utility methods are
+ provided to assist in optimized implementations of this method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableComparator -->
+ <!-- start class org.apache.hadoop.io.WritableFactories -->
+ <class name="WritableFactories" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="factory" type="org.apache.hadoop.io.WritableFactory"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.io.WritableFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factories for non-public writables. Defining a factory permits {@link
+ ObjectWritable} to be able to construct instances of non-public classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableFactories -->
+ <!-- start interface org.apache.hadoop.io.WritableFactory -->
+ <interface name="WritableFactory" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a new instance.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A factory for a class of Writable.
+ @see WritableFactories]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableFactory -->
+ <!-- start class org.apache.hadoop.io.WritableName -->
+ <class name="WritableName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the name that a class should be known as to something other than the
+ class name.]]>
+ </doc>
+ </method>
+ <method name="addName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add an alternate name for a class.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Return the name for a class. Default is {@link Class#getName()}.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the class for a name. Default is {@link Class#forName(String)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility to permit renaming of Writable implementation classes without
+ invalidiating files that contain their class name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableName -->
+ <!-- start class org.apache.hadoop.io.WritableUtils -->
+ <class name="WritableUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="WritableUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readCompressedByteArray" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skipCompressedByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedByteArray" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="bytes" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="displayByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="record" type="byte[]"/>
+ </method>
+ <method name="clone" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="orig" type="org.apache.hadoop.io.Writable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Make a copy of a writable object using serialization to a buffer.
+ @param orig The object to copy
+ @return The copied object]]>
+ </doc>
+ </method>
+ <method name="cloneInto"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.io.Writable"/>
+ <param name="src" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a copy of the writable object using serialiation to a buffer
+ @param dst the object to copy from
+ @param src the object to copy into, which is destroyed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an integer to a binary stream with zero-compressed encoding.
+ For -120 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ integer is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -121 and -124, the following integer
+ is positive, with number of bytes that follow are -(v+120).
+ If the first byte value v is between -125 and -128, the following integer
+ is negative, with number of bytes that follow are -(v+124). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Integer to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized long from stream.]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized integer from stream.]]>
+ </doc>
+ </method>
+ <method name="isNegativeVInt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Given the first byte of a vint/vlong, determine the sign
+ @param value the first byte
+ @return is the value negative]]>
+ </doc>
+ </method>
+ <method name="decodeVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Parse the first byte of a vint/vlong to determine the number of bytes
+ @param value the first byte of the vint/vlong
+ @return the total number of bytes (1 to 9)]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="readEnum" return="T extends java.lang.Enum&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="enumType" type="java.lang.Class&lt;T&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an Enum value from DataInput, Enums are read and written
+ using String values.
+ @param <T> Enum type
+ @param in DataInput to read from
+ @param enumType Class type of Enum
+ @return Enum represented by String read from DataInput
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeEnum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="enumVal" type="java.lang.Enum"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[writes String value of enum to DataOutput.
+ @param out Dataoutput stream
+ @param enumVal enum value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip <i>len</i> number of bytes in input stream<i>in</i>
+ @param in input stream
+ @param len number of bytes to skip
+ @throws IOException when skipped less number of bytes]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableUtils -->
+</package>
+<package name="org.apache.hadoop.io.compress">
+ <!-- start class org.apache.hadoop.io.compress.CodecPool -->
+ <class name="CodecPool" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CodecPool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <doc>
+ <![CDATA[Get a {@link Compressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the
+ <code>Compressor</code>
+ @return <code>Compressor</code> for the given
+ <code>CompressionCodec</code> from the pool or a new one]]>
+ </doc>
+ </method>
+ <method name="getDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <doc>
+ <![CDATA[Get a {@link Decompressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the
+ <code>Decompressor</code>
+ @return <code>Decompressor</code> for the given
+ <code>CompressionCodec</code> the pool or a new one]]>
+ </doc>
+ </method>
+ <method name="returnCompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <doc>
+ <![CDATA[Return the {@link Compressor} to the pool.
+
+ @param compressor the <code>Compressor</code> to be returned to the pool]]>
+ </doc>
+ </method>
+ <method name="returnDecompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <doc>
+ <![CDATA[Return the {@link Decompressor} to the pool.
+
+ @param decompressor the <code>Decompressor</code> to be returned to the
+ pool]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A global compressor/decompressor pool used to save and reuse
+ (possibly native) compression/decompression codecs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CodecPool -->
+ <!-- start interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <interface name="CompressionCodec" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream}.
+
+ @param out the location for the final output stream
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream} with the given {@link Compressor}.
+
+ @param out the location for the final output stream
+ @param compressor compressor to use
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
+
+ @return the type of compressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Compressor} for use by this {@link CompressionCodec}.
+
+ @return a new compressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a stream decompressor that will read from the given input stream.
+
+ @param in the stream to read compressed bytes from
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionInputStream} that will read from the given
+ {@link InputStream} with the given {@link Decompressor}.
+
+ @param in the stream to read compressed bytes from
+ @param decompressor decompressor to use
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
+
+ @return the type of decompressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
+
+ @return a new decompressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a streaming compression/decompression pair.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <class name="CompressionCodecFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionCodecFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Find the codecs specified in the config value io.compression.codecs
+ and register them. Defaults to gzip and zip.]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Print the extension map out as a string.]]>
+ </doc>
+ </method>
+ <method name="getCodecClasses" return="java.util.List&lt;java.lang.Class&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the list of codecs listed in the configuration
+ @param conf the configuration to look in
+ @return a list of the Configuration classes or null if the attribute
+ was not set]]>
+ </doc>
+ </method>
+ <method name="setCodecClasses"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="classes" type="java.util.List&lt;java.lang.Class&gt;"/>
+ <doc>
+ <![CDATA[Sets a list of codec classes in the configuration.
+ @param conf the configuration to modify
+ @param classes the list of classes to set]]>
+ </doc>
+ </method>
+ <method name="getCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Find the relevant compression codec for the given file based on its
+ filename suffix.
+ @param file the filename to check
+ @return the codec object]]>
+ </doc>
+ </method>
+ <method name="removeSuffix" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes a suffix from a filename, if it has it.
+ @param filename the filename to strip
+ @param suffix the suffix to remove
+ @return the shortened filename]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[A little test program.
+ @param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A factory that will find the correct codec for a given filename.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <class name="CompressionInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression input stream that reads
+ the decompressed bytes from the given stream.
+
+ @param in The input stream to be compressed.]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read bytes from the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the decompressor to its initial state and discard any buffered data,
+ as the underlying stream may have been repositioned.]]>
+ </doc>
+ </method>
+ <field name="in" type="java.io.InputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The input stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression input stream.
+
+ <p>Implementations are assumed to be buffered. This permits clients to
+ reposition the underlying input stream then call {@link #resetState()},
+ without having to also synchronize client buffers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <class name="CompressionOutputStream" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression output stream that writes
+ the compressed bytes to the given stream.
+ @param out]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finishes writing compressed data to the output stream
+ without closing the underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the compression to the initial state.
+ Does not reset the underlying stream.]]>
+ </doc>
+ </method>
+ <field name="out" type="java.io.OutputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The output stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression output stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <!-- start interface org.apache.hadoop.io.compress.Compressor -->
+ <interface name="Compressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for compression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of uncompressed bytes input so far.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of compressed bytes output so far.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[When called, indicates that compression should end
+ with the current contents of the input buffer.]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with compressed data. Returns actual number
+ of bytes of compressed data. A return value of 0 indicates that
+ needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets compressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the compressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'compressor' which can be
+ plugged into a {@link CompressionOutputStream} to compress data.
+ This is modelled after {@link java.util.zip.Deflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Compressor -->
+ <!-- start interface org.apache.hadoop.io.compress.Decompressor -->
+ <interface name="Decompressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for decompression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns <code>true</code> if a preset dictionary is needed for decompression.
+ @return <code>true</code> if a preset dictionary is needed for decompression]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with uncompressed data. Returns actual number
+ of bytes of uncompressed data. A return value of 0 indicates that
+ #needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets decompressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the decompressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'de-compressor' which can be
+ plugged into a {@link CompressionInputStream} to compress data.
+ This is modelled after {@link java.util.zip.Inflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Decompressor -->
+ <!-- start class org.apache.hadoop.io.compress.DefaultCodec -->
+ <class name="DefaultCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="DefaultCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.DefaultCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec -->
+ <class name="GzipCodec" extends="org.apache.hadoop.io.compress.DefaultCodec"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class creates gzip compressors/decompressors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <class name="GzipCodec.GzipInputStream" extends="org.apache.hadoop.io.compress.DecompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipInputStream" type="org.apache.hadoop.io.compress.DecompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow subclasses to directly set the inflater stream.]]>
+ </doc>
+ </constructor>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <class name="GzipCodec.GzipOutputStream" extends="org.apache.hadoop.io.compress.CompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipOutputStream" type="org.apache.hadoop.io.compress.CompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow children types to put a different type in here.
+ @param out the Deflater stream to use]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A bridge that wraps around a DeflaterOutputStream to make it
+ a CompressionOutputStream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <!-- start class org.apache.hadoop.io.compress.LzoCodec -->
+ <class name="LzoCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="LzoCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-lzo library is loaded & initialized.
+
+ @param conf configuration
+ @return <code>true</code> if native-lzo library is loaded & initialized;
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link org.apache.hadoop.io.compress.CompressionCodec} for a streaming
+ <b>lzo</b> compression/decompression pair.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzoCodec -->
+</package>
+<package name="org.apache.hadoop.io.compress.lzo">
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor -->
+ <class name="LzoCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="LzoCompressor" type="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified {@link CompressionStrategy}.
+
+ @param strategy lzo compression algorithm to use
+ @param directBufferSize size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="LzoCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default lzo1x_1 compression.]]>
+ </doc>
+ </constructor>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if lzo compressors are loaded and initialized.
+
+ @return <code>true</code> if lzo compressors are loaded & initialized,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of bytes given to this compressor since last reset.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of bytes consumed by callers of compress since last reset.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Noop.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the lzo algorithm.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy -->
+ <class name="LzoCompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression algorithm for lzo library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor -->
+ <class name="LzoDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="LzoDecompressor" type="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new lzo decompressor.
+
+ @param strategy lzo decompression algorithm
+ @param directBufferSize size of the direct-buffer]]>
+ </doc>
+ </constructor>
+ <constructor name="LzoDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new lzo decompressor.]]>
+ </doc>
+ </constructor>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if lzo decompressors are loaded and initialized.
+
+ @return <code>true</code> if lzo decompressors are loaded & initialized,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the lzo algorithm.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy -->
+ <class name="LzoDecompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy -->
+</package>
+<package name="org.apache.hadoop.io.compress.zlib">
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <class name="BuiltInZlibDeflater" extends="java.util.zip.Deflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="BuiltInZlibDeflater" type="int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Deflater to make it conform
+ to org.apache.hadoop.io.compress.Compressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <class name="BuiltInZlibInflater" extends="java.util.zip.Inflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="BuiltInZlibInflater" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibInflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Inflater to make it conform
+ to org.apache.hadoop.io.compress.Decompressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <class name="ZlibCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="ZlibCompressor" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified compression level.
+ Compressed data will be generated in ZLIB format.
+
+ @param level Compression level #CompressionLevel
+ @param strategy Compression strategy #CompressionStrategy
+ @param header Compression header #CompressionHeader
+ @param directBufferSize Size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default compression level.
+ Compressed data will be generated in ZLIB format.]]>
+ </doc>
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <class name="ZlibCompressor.CompressionHeader" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The type of header for compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <class name="ZlibCompressor.CompressionLevel" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <class name="ZlibCompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <class name="ZlibDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="ZlibDecompressor" type="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new decompressor.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <class name="ZlibDecompressor.CompressionHeader" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The headers to detect from compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+ <class name="ZlibFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ZlibFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeZlibLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-zlib code is loaded & initialized correctly and
+ can be loaded for this job.
+
+ @param conf configuration
+ @return <code>true</code> if native-zlib is loaded & initialized
+ and can be loaded for this job, else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of factories to create the right
+ zlib/gzip compressor/decompressor instances.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+</package>
+<package name="org.apache.hadoop.io.retry">
+ <!-- start class org.apache.hadoop.io.retry.RetryPolicies -->
+ <class name="RetryPolicies" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryPolicies"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="retryUpToMaximumCountWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumTimeWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxTime" type="long"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying for a maximum time, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumCountWithProportionalSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by the number of tries so far.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="exponentialBackoffRetry" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by a random
+ number in the range of [0, 2 to the number of retries)
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map&lt;java.lang.Class&lt;? extends java.lang.Exception&gt;, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByRemoteException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map&lt;java.lang.Class&lt;? extends java.lang.Exception&gt;, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ A retry policy for RemoteException
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <field name="TRY_ONCE_THEN_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail by re-throwing the exception.
+ This corresponds to having no retry mechanism in place.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="TRY_ONCE_DONT_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail silently for <code>void</code> methods, or by
+ re-throwing the exception for non-<code>void</code> methods.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="RETRY_FOREVER" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Keep trying forever.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A collection of useful implementations of {@link RetryPolicy}.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryPolicies -->
+ <!-- start interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <interface name="RetryPolicy" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="shouldRetry" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Exception"/>
+ <param name="retries" type="int"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[<p>
+ Determines whether the framework should retry a
+ method for the given exception, and the number
+ of retries that have been made for that operation
+ so far.
+ </p>
+ @param e The exception that caused the method to fail.
+ @param retries The number of times the method has been retried.
+ @return <code>true</code> if the method should be retried,
+ <code>false</code> if the method should not be retried
+ but shouldn't fail with an exception (only for void methods).
+ @throws Exception The re-thrown exception <code>e</code> indicating
+ that the method failed and should not be retried further.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Specifies a policy for retrying method failures.
+ Implementations of this interface should be immutable.
+ </p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <!-- start class org.apache.hadoop.io.retry.RetryProxy -->
+ <class name="RetryProxy" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryProxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class&lt;?&gt;"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the same retry policy for each method in the interface.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param retryPolicy the policy for retirying method call failures
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class&lt;?&gt;"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="methodNameToPolicyMap" type="java.util.Map&lt;java.lang.String, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the a set of retry policies specified by method name.
+ If no retry policy is defined for a method then a default of
+ {@link RetryPolicies#TRY_ONCE_THEN_FAIL} is used.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param methodNameToPolicyMap a map of method names to retry policies
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for creating retry proxies.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryProxy -->
+</package>
+<package name="org.apache.hadoop.io.serializer">
+ <!-- start interface org.apache.hadoop.io.serializer.Deserializer -->
+ <interface name="Deserializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the deserializer for reading.</p>]]>
+ </doc>
+ </method>
+ <method name="deserialize" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ Deserialize the next object from the underlying input stream.
+ If the object <code>t</code> is non-null then this deserializer
+ <i>may</i> set its internal state to the next object read from the input
+ stream. Otherwise, if the object <code>t</code> is null a new
+ deserialized object will be created.
+ </p>
+ @return the deserialized object]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying input stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for deserializing objects of type <T> from an
+ {@link InputStream}.
+ </p>
+
+ <p>
+ Deserializers are stateful, but must not buffer the input since
+ other producers may read from the input between calls to
+ {@link #deserialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Deserializer -->
+ <!-- start class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <class name="DeserializerComparator" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator&lt;T&gt;"/>
+ <constructor name="DeserializerComparator" type="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link Deserializer} to deserialize
+ the objects to be compared so that the standard {@link Comparator} can
+ be used to compare them.
+ </p>
+ <p>
+ One may optimize compare-intensive operations by using a custom
+ implementation of {@link RawComparator} that operates directly
+ on byte representations.
+ </p>
+ @param <T>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <class name="JavaSerialization" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization&lt;java.io.Serializable&gt;"/>
+ <constructor name="JavaSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;java.io.Serializable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;java.io.Serializable&gt;"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;java.io.Serializable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;java.io.Serializable&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ An experimental {@link Serialization} for Java {@link Serializable} classes.
+ </p>
+ @see JavaSerializationComparator]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <class name="JavaSerializationComparator" extends="org.apache.hadoop.io.serializer.DeserializerComparator&lt;T&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JavaSerializationComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o1" type="T extends java.io.Serializable &amp; java.lang.Comparable&lt;T&gt;"/>
+ <param name="o2" type="T extends java.io.Serializable &amp; java.lang.Comparable&lt;T&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link JavaSerialization}
+ {@link Deserializer} to deserialize objects that are then compared via
+ their {@link Comparable} interfaces.
+ </p>
+ @param <T>
+ @see JavaSerialization]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serialization -->
+ <interface name="Serialization" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Allows clients to test whether this {@link Serialization}
+ supports the given class.]]>
+ </doc>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[@return a {@link Serializer} for the given class.]]>
+ </doc>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[@return a {@link Deserializer} for the given class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Encapsulates a {@link Serializer}/{@link Deserializer} pair.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serialization -->
+ <!-- start class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <class name="SerializationFactory" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SerializationFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Serializations are found by reading the <code>io.serializations</code>
+ property from <code>conf</code>, which is a comma-delimited list of
+ classnames.
+ </p>]]>
+ </doc>
+ </constructor>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <method name="getSerialization" return="org.apache.hadoop.io.serializer.Serialization&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for {@link Serialization}s.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serializer -->
+ <interface name="Serializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the serializer for writing.</p>]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Serialize <code>t</code> to the underlying output stream.</p>]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying output stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for serializing objects of type <T> to an
+ {@link OutputStream}.
+ </p>
+
+ <p>
+ Serializers are stateful, but must not buffer the output since
+ other producers may write to the output between calls to
+ {@link #serialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serializer -->
+ <!-- start class org.apache.hadoop.io.serializer.WritableSerialization -->
+ <class name="WritableSerialization" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization&lt;org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="WritableSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;org.apache.hadoop.io.Writable&gt;"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;org.apache.hadoop.io.Writable&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Serialization} for {@link Writable}s that delegates to
+ {@link Writable#write(java.io.DataOutput)} and
+ {@link Writable#readFields(java.io.DataInput)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.WritableSerialization -->
+</package>
+<package name="org.apache.hadoop.ipc">
+ <!-- start class org.apache.hadoop.ipc.Client -->
+ <class name="Client" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Client" type="java.lang.Class, org.apache.hadoop.conf.Configuration, javax.net.SocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client whose values are of the given {@link Writable}
+ class.]]>
+ </doc>
+ </constructor>
+ <constructor name="Client" type="java.lang.Class&lt;?&gt;, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client with the default SocketFactory
+ @param valueClass
+ @param conf]]>
+ </doc>
+ </constructor>
+ <method name="setPingInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="pingInterval" type="int"/>
+ <doc>
+ <![CDATA[set the ping interval value in configuration
+
+ @param conf Configuration
+ @param pingInterval the ping interval]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop all threads related to this client. No further calls may be made
+ using this client.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a call, passing <code>param</code>, to the IPC server running at
+ <code>address</code>, returning the value. Throws exceptions if there are
+ network problems or if the remote code threw an exception.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="params" type="org.apache.hadoop.io.Writable[]"/>
+ <param name="addresses" type="java.net.InetSocketAddress[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Makes a set of calls in parallel. Each parameter is sent to the
+ corresponding address. When all values are available, or have timed out
+ or errored, the collected results are returned in an array. The array
+ contains nulls for calls that timed out or errored.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A client for an IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Server]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Client -->
+ <!-- start class org.apache.hadoop.ipc.RemoteException -->
+ <class name="RemoteException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RemoteException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lookupTypes" type="java.lang.Class[]"/>
+ <doc>
+ <![CDATA[If this remote exception wraps up one of the lookupTypes
+ then return this exception.
+ <p>
+ Unwraps any IOException.
+
+ @param lookupTypes the desired exception class.
+ @return IOException, which is either the lookupClass exception or this.]]>
+ </doc>
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Instantiate and return the exception wrapped up by this remote exception.
+
+ <p> This unwraps any <code>Throwable</code> that has a constructor taking
+ a <code>String</code> as a parameter.
+ Otherwise it returns this.
+
+ @return <code>Throwable]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RemoteException -->
+ <!-- start class org.apache.hadoop.ipc.RPC -->
+ <class name="RPC" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="waitForProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object with the default SocketFactory
+
+ @param protocol
+ @param clientVersion
+ @param addr
+ @param conf
+ @return a proxy instance
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopProxy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="proxy" type="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <doc>
+ <![CDATA[Stop this proxy and release its invoker's resource
+ @param proxy the proxy to be stopped]]>
+ </doc>
+ </method>
+ <method name="call" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="method" type="java.lang.reflect.Method"/>
+ <param name="params" type="java.lang.Object[][]"/>
+ <param name="addrs" type="java.net.InetSocketAddress[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Expert: Make multiple, parallel calls to a set of servers.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="numHandlers" type="int"/>
+ <param name="verbose" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple RPC mechanism.
+
+ A <i>protocol</i> is a Java interface. All parameters and return types must
+ be one of:
+
+ <ul> <li>a primitive type, <code>boolean</code>, <code>byte</code>,
+ <code>char</code>, <code>short</code>, <code>int</code>, <code>long</code>,
+ <code>float</code>, <code>double</code>, or <code>void</code>; or</li>
+
+ <li>a {@link String}; or</li>
+
+ <li>a {@link Writable}; or</li>
+
+ <li>an array of the above types</li> </ul>
+
+ All methods in the protocol should throw only IOException. No field data of
+ the protocol instance is transmitted.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC -->
+ <!-- start class org.apache.hadoop.ipc.RPC.Server -->
+ <class name="RPC.Server" extends="org.apache.hadoop.ipc.Server"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on]]>
+ </doc>
+ </constructor>
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on
+ @param numHandlers the number of method handler threads to run
+ @param verbose whether each call should be logged]]>
+ </doc>
+ </constructor>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receivedTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An RPC Server.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.Server -->
+ <!-- start class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <class name="RPC.VersionMismatch" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.VersionMismatch" type="java.lang.String, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a version mismatch exception
+ @param interfaceName the name of the protocol mismatch
+ @param clientVersion the client's version of the protocol
+ @param serverVersion the server's version of the protocol]]>
+ </doc>
+ </constructor>
+ <method name="getInterfaceName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the interface name
+ @return the java class name
+ (eg. org.apache.hadoop.mapred.InterTrackerProtocol)]]>
+ </doc>
+ </method>
+ <method name="getClientVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the client's preferred version]]>
+ </doc>
+ </method>
+ <method name="getServerVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the server's agreed to version.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A version mismatch for the RPC protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <!-- start class org.apache.hadoop.ipc.Server -->
+ <class name="Server" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class, int, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class&lt;?&gt;, int, org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a server listening on the named port and address. Parameters passed must
+ be of the named class. The <code>handlerCount</handlerCount> determines
+ the number of handler threads that will be used to process calls.]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.ipc.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the server instance called under or null. May be called under
+ {@link #call(Writable, long)} implementations, and under {@link Writable}
+ methods of paramters and return values. Permits applications to access
+ the server context.]]>
+ </doc>
+ </method>
+ <method name="getRemoteIp" return="java.net.InetAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the remote side ip address when invoked inside an RPC
+ Returns null incase of an error.]]>
+ </doc>
+ </method>
+ <method name="getRemoteAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns remote address as a string when invoked inside an RPC.
+ Returns null in case of an error.]]>
+ </doc>
+ </method>
+ <method name="bind"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.ServerSocket"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <param name="backlog" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A convenience method to bind to a given address and report
+ better exceptions if the address is not a valid host.
+ @param socket the socket to bind
+ @param address the address to bind to
+ @param backlog the number of connections allowed in the queue
+ @throws BindException if the address can't be bound
+ @throws UnknownHostException if the address isn't a valid host name
+ @throws IOException other random errors from bind]]>
+ </doc>
+ </method>
+ <method name="setTimeout"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[No longer used.]]>
+ </doc>
+ </method>
+ <method name="setSocketSendBufSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Sets the socket buffer size used for responding to RPCs]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts the service. Must be called before any calls will be handled.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops the service. No new calls will be handled after this is called.]]>
+ </doc>
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Wait for the server to be stopped.
+ Does not wait for all subthreads to finish.
+ See {@link #stop()}.]]>
+ </doc>
+ </method>
+ <method name="getListenerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the socket (ip+port) on which the RPC server is listening to.
+ @return the socket (ip+port) on which the RPC server is listening to.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receiveTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called for each call.]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <field name="HEADER" type="java.nio.ByteBuffer"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The first four bytes of Hadoop RPC connections]]>
+ </doc>
+ </field>
+ <field name="CURRENT_VERSION" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="rpcMetrics" type="org.apache.hadoop.ipc.metrics.RpcMetrics"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Client]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Server -->
+ <!-- start interface org.apache.hadoop.ipc.VersionedProtocol -->
+ <interface name="VersionedProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return protocol version corresponding to protocol interface.
+ @param protocol The classname of the protocol interface
+ @param clientVersion The version of the protocol that the client speaks
+ @return the version that the server will speak]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Superclass of all protocols that use Hadoop RPC.
+ Subclasses of this interface are also supposed to have
+ a static final long versionID field.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.VersionedProtocol -->
+</package>
+<package name="org.apache.hadoop.ipc.metrics">
+ <!-- start class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <class name="RpcMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="RpcMetrics" type="java.lang.String, java.lang.String, org.apache.hadoop.ipc.Server"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Push the metrics to the monitoring subsystem on doUpdate() call.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="rpcQueueTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The metrics variables are public:
+ - they can be set directly by calling their set/inc methods
+ -they can also be read directly - e.g. JMX does this.]]>
+ </doc>
+ </field>
+ <field name="rpcProcessingTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="metricsList" type="java.util.Map&lt;java.lang.String, org.apache.hadoop.metrics.util.MetricsTimeVaryingRate&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various RPC statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #rpcQueueTime}.inc(time)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <!-- start interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+ <interface name="RpcMgtMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRpcOpsNumber" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of RPC Operations in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for RPC Operations in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Average RPC Operation Queued Time in the last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all min max times]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for the RPC layer.
+ Many of the statistics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the statistics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ rpc.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+</package>
+<package name="org.apache.hadoop.log">
+ <!-- start class org.apache.hadoop.log.LogLevel -->
+ <class name="LogLevel" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[A command line implementation]]>
+ </doc>
+ </method>
+ <field name="USAGES" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Change log level in runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel -->
+ <!-- start class org.apache.hadoop.log.LogLevel.Servlet -->
+ <class name="LogLevel.Servlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel.Servlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A servlet implementation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel.Servlet -->
+</package>
+<package name="org.apache.hadoop.mapred">
+ <!-- start class org.apache.hadoop.mapred.ClusterStatus -->
+ <class name="ClusterStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of task trackers in the cluster.
+
+ @return the number of task trackers in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running map tasks in the cluster.
+
+ @return the number of currently running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running reduce tasks in the cluster.
+
+ @return the number of currently running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running map tasks in the cluster.
+
+ @return the maximum capacity for running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running reduce tasks in the cluster.
+
+ @return the maximum capacity for running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getJobTrackerState" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current state of the <code>JobTracker</code>,
+ as {@link JobTracker.State}
+
+ @return the current state of the <code>JobTracker</code>.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Status information on the current state of the Map-Reduce cluster.
+
+ <p><code>ClusterStatus</code> provides clients with information such as:
+ <ol>
+ <li>
+ Size of the cluster.
+ </li>
+ <li>
+ Task capacity of the cluster.
+ </li>
+ <li>
+ The number of currently running map & reduce tasks.
+ </li>
+ <li>
+ State of the <code>JobTracker</code>.
+ </li>
+ </ol></p>
+
+ <p>Clients can query for the latest <code>ClusterStatus</code>, via
+ {@link JobClient#getClusterStatus()}.</p>
+
+ @see JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ClusterStatus -->
+ <!-- start class org.apache.hadoop.mapred.Counters -->
+ <class name="Counters" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.mapred.Counters.Group&gt;"/>
+ <constructor name="Counters"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getGroupNames" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all counter classes.
+ @return Set of counter names.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.mapred.Counters.Group&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getGroup" return="org.apache.hadoop.mapred.Counters.Group"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="groupName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the named counter group, or an empty group if there is none
+ with the specified name.]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Find the counter for the given enum. The same enum will always return the
+ same counter.
+ @param key the counter key
+ @return the matching counter object]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="group" type="java.lang.String"/>
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Find a counter by using strings
+ @param group the name of the group
+ @param id the id of the counter within the group (0 to N-1)
+ @param name the internal name of the counter
+ @return the counter for that name
+ @deprecated]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param key identifies a counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="counter" type="java.lang.String"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param group the name of the group
+ @param counter the internal name of the counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Returns current value of the specified counter, or 0 if the counter
+ does not exist.]]>
+ </doc>
+ </method>
+ <method name="incrAllCounters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Increments multiple counters by their amounts in another Counters
+ instance.
+ @param other the other Counters instance]]>
+ </doc>
+ </method>
+ <method name="sum" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.mapred.Counters"/>
+ <param name="b" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Convenience method for computing the sum of two sets of counters.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of counters, by summing the number of counters
+ in each group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the set of groups.
+ The external format is:
+ #groups (groupName group)*
+
+ i.e. the number of groups followed by 0 or more groups, where each
+ group is of the form:
+
+ groupDisplayName #counters (false | true counter)*
+
+ where each counter is of the form:
+
+ name (false | true displayName) value]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a set of groups.]]>
+ </doc>
+ </method>
+ <method name="log"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Logs the current counter values.
+ @param log The log to use.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return textual representation of the counter values.]]>
+ </doc>
+ </method>
+ <method name="makeCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert a counters object into a single line that is easy to parse.
+ @return the string with "name=value" for each counter and separated by ","]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A set of named counters.
+
+ <p><code>Counters</code> represent global counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> can be of
+ any {@link Enum} type.</p>
+
+ <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
+ counters from a particular <code>Enum</code> class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Counter -->
+ <class name="Counters.Counter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the internal name of the counter.
+ @return the internal name of the counter]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the counter.
+ @return the user facing name of the counter]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[What is the current value of this counter?
+ @return the current value]]>
+ </doc>
+ </method>
+ <method name="increment"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Increment this counter by the given value
+ @param incr the value to increase this counter by]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A counter record, comprising its name and value.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Counter -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Group -->
+ <class name="Counters.Group" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.mapred.Counters.Counter&gt;"/>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns raw name of the group. This is the name of the enum class
+ for this group of counters.]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns localized name of the group. This is the same as getName() by
+ default, but different if an appropriate ResourceBundle is found.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="counterName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the specified counter, or 0 if the counter does
+ not exist.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getCounter(String)} instead">
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given id and create it if it doesn't exist.
+ @param id the numeric id of the counter within the group
+ @param name the internal counter name
+ @return the counter
+ @deprecated use {@link #getCounter(String)} instead]]>
+ </doc>
+ </method>
+ <method name="getCounterForName" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given name and create it if it doesn't exist.
+ @param name the internal counter name
+ @return the counter]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of counters in this group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.mapred.Counters.Counter&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<code>Group</code> of counters, comprising of counters from a particular
+ counter {@link Enum} class.
+
+ <p><code>Group</code>handles localization of the class name and the
+ counter names.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Group -->
+ <!-- start class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <class name="DefaultJobHistoryParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DefaultJobHistoryParser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseJobTasks"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobHistoryFile" type="java.lang.String"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobHistory.JobInfo"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Populates a JobInfo object from the job's history log file.
+ @param jobHistoryFile history file for this job.
+ @param job a precreated JobInfo object, should be non-null.
+ @param fs FileSystem where historyFile is present.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Default parser for job history files. It creates object model from
+ job history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <!-- start class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <class name="FileAlreadyExistsException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileAlreadyExistsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileAlreadyExistsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when target file already exists for any operation and
+ is not configured to be overwritten.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <!-- start class org.apache.hadoop.mapred.FileInputFormat -->
+ <class name="FileInputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <constructor name="FileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setMinSplitSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="minSplitSize" type="long"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="filename" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Is the given filename splitable? Usually, true, but if the file is
+ stream compressed, it will not be.
+
+ <code>FileInputFormat</code> implementations can override this and return
+ <code>false</code> to ensure that individual input files are never split-up
+ so that {@link Mapper}s process entire files.
+
+ @param fs the file system that the file is on
+ @param filename the file name to check
+ @return is this file splitable?]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setInputPathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="filter" type="java.lang.Class&lt;? extends org.apache.hadoop.fs.PathFilter&gt;"/>
+ <doc>
+ <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job.
+
+ @param filter the PathFilter class use for filtering the input paths.]]>
+ </doc>
+ </method>
+ <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get a PathFilter instance of the filter set for the input paths.
+
+ @return the PathFilter instance set for the job, NULL if none has been set.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression.
+
+ @param job the job to list input paths for
+ @return array of FileStatus objects
+ @throws IOException if zero items.]]>
+ </doc>
+ </method>
+ <method name="listPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="Use {@link #listStatus(JobConf)} instead.">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression.
+
+ @param job the job to list input paths for
+ @return array of Path objects
+ @throws IOException if zero items.
+ @deprecated Use {@link #listStatus(JobConf)} instead.]]>
+ </doc>
+ </method>
+ <method name="validateInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Splits files returned by {@link #listStatus(JobConf)} when
+ they're too big.]]>
+ </doc>
+ </method>
+ <method name="computeSplitSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="goalSize" type="long"/>
+ <param name="minSize" type="long"/>
+ <param name="blockSize" type="long"/>
+ </method>
+ <method name="getBlockIndex" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+ <param name="offset" type="long"/>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the given comma separated paths as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be set as
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add the given comma separated paths to the list of inputs for
+ the map-reduce job.
+
+ @param conf The configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be added to
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Set the array of {@link Path}s as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job.
+ @param inputPaths the {@link Path}s of the input directories/files
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+
+ @param conf The configuration of the job
+ @return the list of input {@link Path}s for the map-reduce job.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class for file-based {@link InputFormat}.
+
+ <p><code>FileInputFormat</code> is the base class for all file-based
+ <code>InputFormat</code>s. This provides a generic implementation of
+ {@link #getSplits(JobConf, int)}.
+ Subclasses of <code>FileInputFormat</code> can also override the
+ {@link #isSplitable(FileSystem, Path)} method to ensure input-files are
+ not split-up and are processed as a whole by {@link Mapper}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileOutputFormat -->
+ <class name="FileOutputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="FileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setCompressOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+ <code>false</code> otherwise]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+ compress the job outputs]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the
+ job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the {@link Path} of the output directory for the map-reduce job.
+
+ @param conf The configuration of the job.
+ @param outputDir the {@link Path} of the output directory for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the output directory for the map-reduce job.
+
+ @return the {@link Path} to the output directory for the map-reduce job.
+ @see FileOutputFormat#getWorkOutputPath(JobConf)]]>
+ </doc>
+ </method>
+ <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the task's temporary output directory
+ for the map-reduce job
+
+ <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4>
+
+ <p>Some applications need to create/write-to side-files, which differ from
+ the actual job-outputs.
+
+ <p>In such cases there could be issues with 2 instances of the same TIP
+ (running simultaneously e.g. speculative tasks) trying to open/write-to the
+ same file (path) on HDFS. Hence the application-writer will have to pick
+ unique names per task-attempt (e.g. using the attemptid, say
+ <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p>
+
+ <p>To get around this the Map-Reduce framework helps the application-writer
+ out by maintaining a special
+ <tt>${mapred.output.dir}/_temporary/_${taskid}</tt>
+ sub-directory for each task-attempt on HDFS where the output of the
+ task-attempt goes. On successful completion of the task-attempt the files
+ in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only)
+ are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the
+ framework discards the sub-directory of unsuccessful task-attempts. This
+ is completely transparent to the application.</p>
+
+ <p>The application-writer can take advantage of this by creating any
+ side-files required in <tt>${mapred.work.output.dir}</tt> during execution
+ of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the
+ framework will move them out similarly - thus she doesn't have to pick
+ unique paths per task-attempt.</p>
+
+ <p><i>Note</i>: the value of <tt>${mapred.work.output.dir}</tt> during
+ execution of a particular task-attempt is actually
+ <tt>${mapred.output.dir}/_temporary/_{$taskid}</tt>, and this value is
+ set by the map-reduce framework. So, just create any side-files in the
+ path returned by {@link #getWorkOutputPath(JobConf)} from map/reduce
+ task to take advantage of this feature.</p>
+
+ <p>The entire discussion holds true for maps of jobs with
+ reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
+ goes directly to HDFS.</p>
+
+ @return the {@link Path} to the task's temporary output directory
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to create the task's temporary output directory and
+ return the path to the task's output file.
+
+ @param conf job-configuration
+ @param name temporary task-output filename
+ @return path to the task's temporary output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base class for {@link OutputFormat}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileSplit -->
+ <class name="FileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[Constructs a split.
+ @deprecated
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process]]>
+ </doc>
+ </constructor>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null]]>
+ </doc>
+ </constructor>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file containing this split's data.]]>
+ </doc>
+ </method>
+ <method name="getStart" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The position of the first byte in the file to process.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the file to process.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A section of an input file. Returned by {@link
+ InputFormat#getSplits(JobConf, int)} and passed to
+ {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileSplit -->
+ <!-- start class org.apache.hadoop.mapred.ID -->
+ <class name="ID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable&lt;org.apache.hadoop.mapred.ID&gt;"/>
+ <constructor name="ID" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructs an ID object from the given int]]>
+ </doc>
+ </constructor>
+ <constructor name="ID"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[returns the int which represents the identifier]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare IDs by associated numbers]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.ID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.ID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct an ID object from given string
+
+ @return constructed Id object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A general identifier, which internally stores the id
+ as an integer. This is the super class of {@link JobID},
+ {@link TaskID} and {@link TaskAttemptID}.
+
+ @see JobID
+ @see TaskID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ID -->
+ <!-- start interface org.apache.hadoop.mapred.InputFormat -->
+ <interface name="InputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="validateInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="getSplits is called in the client and can perform any
+ necessary validation of the input">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check for validity of the input-specification for the job.
+
+ <p>This method is used to validate the input directories when a job is
+ submitted so that the {@link JobClient} can fail early, with an useful
+ error message, in case of errors. For e.g. input directory does not exist.
+ </p>
+
+ @param job job configuration.
+ @throws InvalidInputException if the job does not have valid input
+ @deprecated getSplits is called in the client and can perform any
+ necessary validation of the input]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically split the set of input files for the job.
+
+ <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper}
+ for processing.</p>
+
+ <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the
+ input files are not physically split into chunks. For e.g. a split could
+ be <i>&lt;input-file-path, start, offset&gt;</i> tuple.
+
+ @param job job configuration.
+ @param numSplits the desired number of splits, a hint.
+ @return an array of {@link InputSplit}s for the job.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordReader} for the given {@link InputSplit}.
+
+ <p>It is the responsibility of the <code>RecordReader</code> to respect
+ record boundaries while processing the logical split to present a
+ record-oriented view to the individual task.</p>
+
+ @param split the {@link InputSplit}
+ @param job the job that this split belongs to
+ @return a {@link RecordReader}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputFormat</code> describes the input-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the input-specification of the job.
+ <li>
+ Split-up the input file(s) into logical {@link InputSplit}s, each of
+ which is then assigned to an individual {@link Mapper}.
+ </li>
+ <li>
+ Provide the {@link RecordReader} implementation to be used to glean
+ input records from the logical <code>InputSplit</code> for processing by
+ the {@link Mapper}.
+ </li>
+ </ol>
+
+ <p>The default behavior of file-based {@link InputFormat}s, typically
+ sub-classes of {@link FileInputFormat}, is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of the input files. However, the {@link FileSystem} blocksize of
+ the input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Clearly, logical splits based on input-size is insufficient for many
+ applications since record boundaries are to respected. In such cases, the
+ application has to also implement a {@link RecordReader} on whom lies the
+ responsibilty to respect record-boundaries and present a record-oriented
+ view of the logical <code>InputSplit</code> to the individual task.
+
+ @see InputSplit
+ @see RecordReader
+ @see JobClient
+ @see FileInputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.InputSplit -->
+ <interface name="InputSplit" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the total number of bytes in the data of the <code>InputSplit</code>.
+
+ @return the number of bytes in the input split.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hostnames where the input split is located.
+
+ @return list of hostnames where data of the <code>InputSplit</code> is
+ located as an array of <code>String</code>s.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputSplit</code> represents the data to be processed by an
+ individual {@link Mapper}.
+
+ <p>Typically, it presents a byte-oriented view on the input and is the
+ responsibility of {@link RecordReader} of the job to process this and present
+ a record-oriented view.
+
+ @see InputFormat
+ @see RecordReader]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputSplit -->
+ <!-- start class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <class name="InvalidFileTypeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidFileTypeException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidFileTypeException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when file type differs from the desired file type. like
+ getting a file when a directory is expected. Or a wrong file type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidInputException -->
+ <class name="InvalidInputException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidInputException" type="java.util.List&lt;java.io.IOException&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create the exception with the given list.
+ @param probs the list of problems to report. this list is not copied.]]>
+ </doc>
+ </constructor>
+ <method name="getProblems" return="java.util.List&lt;java.io.IOException&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete list of the problems reported.
+ @return the list of problems, which must not be modified]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get a summary message of the problems found.
+ @return the concatenated messages from all of the problems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class wraps a list of problems with the input, so that the user
+ can get a list of problems together instead of finding and fixing them one
+ by one.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidInputException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <class name="InvalidJobConfException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidJobConfException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidJobConfException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when jobconf misses some mendatory attributes
+ or value of some attributes is invalid.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <!-- start class org.apache.hadoop.mapred.IsolationRunner -->
+ <class name="IsolationRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IsolationRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Run a single task
+ @param args the first argument is the task directory]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.IsolationRunner -->
+ <!-- start class org.apache.hadoop.mapred.JobClient -->
+ <class name="JobClient" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobClient"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job client.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client with the given {@link JobConf}, and connect to the
+ default {@link JobTracker}.
+
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client, connect to the indicated job tracker.
+
+ @param jobTrackAddr the job tracker to connect to.
+ @param conf configuration.]]>
+ </doc>
+ </constructor>
+ <method name="getCommandLineConfig" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the command line configuration]]>
+ </doc>
+ </method>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Connect to the default {@link JobTracker}.
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the <code>JobClient</code>.]]>
+ </doc>
+ </method>
+ <method name="getFs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a filesystem handle. We need this to prepare jobs
+ for submission to the MapReduce system.
+
+ @return the filesystem handle.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobFile" type="java.lang.String"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param jobFile the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param job the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an {@link RunningJob} object to track an ongoing job. Returns
+ null if the id does not correspond to any known job.
+
+ @param jobid the jobid of the job.
+ @return the {@link RunningJob} handle to track the job, null if the
+ <code>jobid</code> doesn't correspond to any known job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getJob(JobID)}.">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getJob(JobID)}.]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the map tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the map tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getMapTaskReports(JobID)}">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getMapTaskReports(JobID)}]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the reduce tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the reduce tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getReduceTaskReports(JobID)}">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getReduceTaskReports(JobID)}]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the Map-Reduce cluster.
+
+ @return the status information about the Map-Reduce cluster as an object
+ of {@link ClusterStatus}.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are not completed and not failed.
+
+ @return array of {@link JobStatus} for the running/to-be-run jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are submitted.
+
+ @return array of {@link JobStatus} for the submitted jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="runJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Utility that submits a job, then polls for progress until the job is
+ complete.
+
+ @param job the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Sets the output filter for tasks. only those tasks are printed whose
+ output matches the filter.
+ @param newValue task filter.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the task output filter out of the JobConf.
+
+ @param job the JobConf to examine.
+ @return the filter level.]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Modify the JobConf to set the task output filter.
+
+ @param job the JobConf to modify.
+ @param newValue the value to set.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task output filter.
+ @return task filter.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="getDefaultMaps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the max available Maps in the cluster.
+
+ @return the max available Maps in the cluster
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDefaultReduces" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the max available Reduces in the cluster.
+
+ @return the max available Reduces in the cluster
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getSystemDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Grab the jobtracker system directory path where job-specific files are to be placed.
+
+ @return the system directory where job-specific files are to be placed.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[<code>JobClient</code> is the primary interface for the user-job to interact
+ with the {@link JobTracker}.
+
+ <code>JobClient</code> provides facilities to submit jobs, track their
+ progress, access component-tasks' reports/logs, get the Map-Reduce cluster
+ status information etc.
+
+ <p>The job submission process involves:
+ <ol>
+ <li>
+ Checking the input and output specifications of the job.
+ </li>
+ <li>
+ Computing the {@link InputSplit}s for the job.
+ </li>
+ <li>
+ Setup the requisite accounting information for the {@link DistributedCache}
+ of the job, if necessary.
+ </li>
+ <li>
+ Copying the job's jar and configuration to the map-reduce system directory
+ on the distributed file-system.
+ </li>
+ <li>
+ Submitting the job to the <code>JobTracker</code> and optionally monitoring
+ it's status.
+ </li>
+ </ol></p>
+
+ Normally the user creates the application, describes various facets of the
+ job via {@link JobConf} and then uses the <code>JobClient</code> to submit
+ the job and monitor its progress.
+
+ <p>Here is an example on how to use <code>JobClient</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ job.setInputPath(new Path("in"));
+ job.setOutputPath(new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ </pre></blockquote></p>
+
+ <h4 id="JobControl">Job Control</h4>
+
+ <p>At times clients would chain map-reduce jobs to accomplish complex tasks
+ which cannot be done via a single map-reduce job. This is fairly easy since
+ the output of the job, typically, goes to distributed file-system and that
+ can be used as the input for the next job.</p>
+
+ <p>However, this also means that the onus on ensuring jobs are complete
+ (success/failure) lies squarely on the clients. In such situations the
+ various job-control options are:
+ <ol>
+ <li>
+ {@link #runJob(JobConf)} : submits the job and returns only after
+ the job has completed.
+ </li>
+ <li>
+ {@link #submitJob(JobConf)} : only submits the job, then poll the
+ returned handle to the {@link RunningJob} to query status and make
+ scheduling decisions.
+ </li>
+ <li>
+ {@link JobConf#setJobEndNotificationURI(String)} : setup a notification
+ on job-completion, thus avoiding polling.
+ </li>
+ </ol></p>
+
+ @see JobConf
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient -->
+ <!-- start class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <class name="JobClient.TaskStatusFilter" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobClient.TaskStatusFilter&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <!-- start class org.apache.hadoop.mapred.JobConf -->
+ <class name="JobConf" extends="org.apache.hadoop.conf.Configuration"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <method name="getJar" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user jar for the map-reduce job.
+
+ @return the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJar"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jar" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user jar for the map-reduce job.
+
+ @param jar the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJarByClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the job's jar file by finding an example class location.
+
+ @param cls the example class.]]>
+ </doc>
+ </method>
+ <method name="getSystemDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link JobClient#getSystemDir()} instead.
+ Get the system directory where job-specific files are to be placed.">
+ <doc>
+ <![CDATA[@deprecated Use {@link JobClient#getSystemDir()} instead.
+ Get the system directory where job-specific files are to be placed.
+
+ @return the system directory where job-specific files are to be placed.]]>
+ </doc>
+ </method>
+ <method name="getLocalDirs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="subdir" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a local file name. Files are distributed among configured
+ local directories.]]>
+ </doc>
+ </method>
+ <method name="setInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileInputFormat#setInputPaths(JobConf, Path...)} or
+ {@link FileInputFormat#setInputPaths(JobConf, String)}">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the {@link Path} of the input directory for the map-reduce job.
+
+ @param dir the {@link Path} of the input directory for the map-reduce job.
+ @deprecated Use {@link FileInputFormat#setInputPaths(JobConf, Path...)} or
+ {@link FileInputFormat#setInputPaths(JobConf, String)}]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileInputFormat#addInputPath(JobConf, Path)} or
+ {@link FileInputFormat#addInputPaths(JobConf, String)}">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+
+ @param dir {@link Path} to be added to the list of inputs for
+ the map-reduce job.
+ @deprecated Use {@link FileInputFormat#addInputPath(JobConf, Path)} or
+ {@link FileInputFormat#addInputPaths(JobConf, String)}]]>
+ </doc>
+ </method>
+ <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileInputFormat#getInputPaths(JobConf)}">
+ <doc>
+ <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+
+ @return the list of input {@link Path}s for the map-reduce job.
+ @deprecated Use {@link FileInputFormat#getInputPaths(JobConf)}]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reported username for this job.
+
+ @return the username]]>
+ </doc>
+ </method>
+ <method name="setUser"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the reported username for this job.
+
+ @param user the username for this job.]]>
+ </doc>
+ </method>
+ <method name="setKeepFailedTaskFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the framework should keep the intermediate files for
+ failed tasks.
+
+ @param keep <code>true</code> if framework should keep the intermediate files
+ for failed tasks, <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="getKeepFailedTaskFiles" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should the temporary files for failed tasks be kept?
+
+ @return should the files be kept?]]>
+ </doc>
+ </method>
+ <method name="setKeepTaskFilesPattern"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pattern" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set a regular expression for task names that should be kept.
+ The regular expression ".*_m_000123_0" would keep the files
+ for the first instance of map 123 that ran.
+
+ @param pattern the java.util.regex.Pattern to match against the
+ task names.]]>
+ </doc>
+ </method>
+ <method name="getKeepTaskFilesPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the regular expression that is matched against the task names
+ to see if we need to keep the files.
+
+ @return the pattern as a string, if it was set, othewise null.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the default file system.
+
+ @param dir the new current working directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the default file system.
+
+ @return the directory name.]]>
+ </doc>
+ </method>
+ <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileOutputFormat#getOutputPath(JobConf)} or
+ {@link FileOutputFormat#getWorkOutputPath(JobConf)}
+ Get the {@link Path} to the output directory for the map-reduce job.">
+ <doc>
+ <![CDATA[@deprecated Use {@link FileOutputFormat#getOutputPath(JobConf)} or
+ {@link FileOutputFormat#getWorkOutputPath(JobConf)}
+ Get the {@link Path} to the output directory for the map-reduce job.
+
+ @return the {@link Path} to the output directory for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileOutputFormat#setOutputPath(JobConf, Path)}
+ Set the {@link Path} of the output directory for the map-reduce job.
+
+ lEsS_tHaNp>lEsS_tHaNi>NotelEsS_tHaN/i>:
+ lEsS_tHaN/p>">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[@deprecated Use {@link FileOutputFormat#setOutputPath(JobConf, Path)}
+ Set the {@link Path} of the output directory for the map-reduce job.
+
+ <p><i>Note</i>:
+ </p>
+ @param dir the {@link Path} of the output directory for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getInputFormat" return="org.apache.hadoop.mapred.InputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link InputFormat} implementation for the map-reduce job,
+ defaults to {@link TextInputFormat} if not specified explicity.
+
+ @return the {@link InputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link InputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link InputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="getOutputFormat" return="org.apache.hadoop.mapred.OutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link OutputFormat} implementation for the map-reduce job,
+ defaults to {@link TextOutputFormat} if not specified explicity.
+
+ @return the {@link OutputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setOutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputFormat&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link OutputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link OutputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="setCompressMapOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Should the map outputs be compressed before transfer?
+ Uses the SequenceFile compression.
+
+ @param compress should the map outputs be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressMapOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Are the outputs of the maps be compressed?
+
+ @return <code>true</code> if the outputs of the maps are to be compressed,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="{@link CompressionType} is no longer valid for intermediate
+ map-outputs.">
+ <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionType} for the map outputs.
+
+ @param style the {@link CompressionType} to control how the map outputs
+ are compressed.
+ @deprecated {@link CompressionType} is no longer valid for intermediate
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="{@link CompressionType} is no longer valid for intermediate
+ map-outputs.">
+ <doc>
+ <![CDATA[Get the {@link CompressionType} for the map outputs.
+
+ @return the {@link CompressionType} for map outputs, defaulting to
+ {@link CompressionType#RECORD}.
+ @deprecated {@link CompressionType} is no longer valid for intermediate
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the given class as the {@link CompressionCodec} for the map outputs.
+
+ @param codecClass the {@link CompressionCodec} class that will compress
+ the map outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the map outputs.
+
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} class that should be used to compress the
+ map outputs.
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getMapOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the map output data. If it is not set, use the
+ (final) output key class. This allows the map output key class to be
+ different than the final output key class.
+
+ @return the map output key class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the map output data. This allows the user to
+ specify the map output key class to be different than the final output
+ value class.
+
+ @param theClass the map output key class.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for the map output data. If it is not set, use the
+ (final) output value class This allows the map output value class to be
+ different than the final output value class.
+
+ @return the map output value class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for the map output data. This allows the user to
+ specify the map output value class to be different than the final output
+ value class.
+
+ @param theClass the map output value class.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the job output data.
+
+ @return the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the job output data.
+
+ @param theClass the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link RawComparator} comparator used to compare keys.
+
+ @return the {@link RawComparator} comparator used to compare keys.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyComparatorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.RawComparator&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link RawComparator} comparator used to compare keys.
+
+ @param theClass the {@link RawComparator} comparator used to
+ compare keys.
+ @see #setOutputValueGroupingComparator(Class)]]>
+ </doc>
+ </method>
+ <method name="getOutputValueGroupingComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user defined {@link WritableComparable} comparator for
+ grouping keys of inputs to the reduce.
+
+ @return comparator set by the user for grouping values.
+ @see #setOutputValueGroupingComparator(Class) for details.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueGroupingComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.RawComparator&gt;"/>
+ <doc>
+ <![CDATA[Set the user defined {@link RawComparator} comparator for
+ grouping keys in the input to the reduce.
+
+ <p>This comparator should be provided if the equivalence rules for keys
+ for sorting the intermediates are different from those for grouping keys
+ before each call to
+ {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
+
+ <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
+ in a single call to the reduce function if K1 and K2 compare as equal.</p>
+
+ <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
+ how keys are sorted, this can be used in conjunction to simulate
+ <i>secondary sort on values</i>.</p>
+
+ <p><i>Note</i>: This is not a guarantee of the reduce sort being
+ <i>stable</i> in any sense. (In any case, with the order of available
+ map-outputs to the reduce being non-deterministic, it wouldn't make
+ that much sense.)</p>
+
+ @param theClass the comparator class to be used for grouping keys.
+ It should implement <code>RawComparator</code>.
+ @see #setOutputKeyComparatorClass(Class)]]>
+ </doc>
+ </method>
+ <method name="getOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for job outputs.
+
+ @return the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for job outputs.
+
+ @param theClass the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapperClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Mapper} class for the job.
+
+ @return the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapperClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Mapper} class for the job.
+
+ @param theClass the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getMapRunnerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.MapRunnable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link MapRunnable} class for the job.
+
+ @return the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapRunnerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.MapRunnable&gt;"/>
+ <doc>
+ <![CDATA[Expert: Set the {@link MapRunnable} class for the job.
+
+ Typically used to exert greater control on {@link Mapper}s.
+
+ @param theClass the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getPartitionerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Partitioner&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Partitioner} used to partition {@link Mapper}-outputs
+ to be sent to the {@link Reducer}s.
+
+ @return the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setPartitionerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Partitioner&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Partitioner} class used to partition
+ {@link Mapper}-outputs to be sent to the {@link Reducer}s.
+
+ @param theClass the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getReducerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Reducer} class for the job.
+
+ @return the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setReducerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Reducer} class for the job.
+
+ @param theClass the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getCombinerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers. Typically the combiner is same as the
+ the {@link Reducer} for the job i.e. {@link #getReducerClass()}.
+
+ @return the user-defined combiner class used to combine map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCombinerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"/>
+ <doc>
+ <![CDATA[Set the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers.
+
+ <p>The combiner is a task-level aggregation operation which, in some cases,
+ helps to cut down the amount of data transferred from the {@link Mapper} to
+ the {@link Reducer}, leading to better performance.</p>
+
+ <p>Typically the combiner is same as the <code>Reducer</code> for the
+ job i.e. {@link #setReducerClass(Class)}.</p>
+
+ @param theClass the user-defined combiner class used to combine
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCombineOnceOnly"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[If true, ensures the combiner is run once and only once on output from
+ the map. Otherwise, combiner may be run zero or more times.]]>
+ </doc>
+ </method>
+ <method name="getCombineOnceOnly" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on, else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getMapSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for map tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be
+ used for this job for map tasks,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for map tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for map tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getReduceSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for reduce tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used
+ for reduce tasks for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setReduceSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for reduce tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for reduce tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getNumMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job.
+ Defaults to <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumMapTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the number of map tasks for this job.
+
+ <p><i>Note</i>: This is only a <i>hint</i> to the framework. The actual
+ number of spawned map tasks depends on the number of {@link InputSplit}s
+ generated by the job's {@link InputFormat#getSplits(JobConf, int)}.
+
+ A custom {@link InputFormat} is typically used to accurately control
+ the number of map tasks for the job.</p>
+
+ <h4 id="NoOfMaps">How many maps?</h4>
+
+ <p>The number of maps is usually driven by the total size of the inputs
+ i.e. total number of blocks of the input files.</p>
+
+ <p>The right level of parallelism for maps seems to be around 10-100 maps
+ per-node, although it has been set up to 300 or so for very cpu-light map
+ tasks. Task setup takes awhile, so it is best if the maps take at least a
+ minute to execute.</p>
+
+ <p>The default behavior of file-based {@link InputFormat}s is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of input files. However, the {@link FileSystem} blocksize of the
+ input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Thus, if you expect 10TB of input data and have a blocksize of 128MB,
+ you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is
+ used to set it even higher.</p>
+
+ @param n the number of map tasks for this job.
+ @see InputFormat#getSplits(JobConf, int)
+ @see FileInputFormat
+ @see FileSystem#getDefaultBlockSize()
+ @see FileStatus#getBlockSize()]]>
+ </doc>
+ </method>
+ <method name="getNumReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job. Defaults to
+ <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumReduceTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the requisite number of reduce tasks for this job.
+
+ <h4 id="NoOfReduces">How many reduces?</h4>
+
+ <p>The right number of reduces seems to be <code>0.95</code> or
+ <code>1.75</code> multiplied by (&lt;<i>no. of nodes</i>&gt; *
+ <a href="{@docRoot}/../hadoop-default.html#mapred.tasktracker.reduce.tasks.maximum">
+ mapred.tasktracker.reduce.tasks.maximum</a>).
+ </p>
+
+ <p>With <code>0.95</code> all of the reduces can launch immediately and
+ start transfering map outputs as the maps finish. With <code>1.75</code>
+ the faster nodes will finish their first round of reduces and launch a
+ second wave of reduces doing a much better job of load balancing.</p>
+
+ <p>Increasing the number of reduces increases the framework overhead, but
+ increases load balancing and lowers the cost of failures.</p>
+
+ <p>The scaling factors above are slightly less than whole numbers to
+ reserve a few reduce slots in the framework for speculative-tasks, failures
+ etc.</p>
+
+ <h4 id="ReducerNone">Reducer NONE</h4>
+
+ <p>It is legal to set the number of reduce-tasks to <code>zero</code>.</p>
+
+ <p>In this case the output of the map-tasks directly go to distributed
+ file-system, to the path set by
+ {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Also, the
+ framework doesn't sort the map-outputs before writing it out to HDFS.</p>
+
+ @param n the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ map task, as specified by the <code>mapred.map.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ map task.
+
+ @param n the number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ reduce task, as specified by the <code>mapred.reduce.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ reduce task.
+
+ @param n the number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name. This is only used to identify the
+ job to the user.
+
+ @return the job's name, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified job name.
+
+ @param name the job's new name.]]>
+ </doc>
+ </method>
+ <method name="getSessionId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified session identifier. The default is the empty string.
+
+ The session identifier is used to tag metric data that is reported to some
+ performance metrics system via the org.apache.hadoop.metrics API. The
+ session identifier is intended, in particular, for use by Hadoop-On-Demand
+ (HOD) which allocates a virtual Hadoop cluster dynamically and transiently.
+ HOD will set the session identifier by modifying the hadoop-site.xml file
+ before starting the cluster.
+
+ When not running under HOD, this identifer is expected to remain set to
+ the empty string.
+
+ @return the session identifier, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setSessionId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sessionId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified session identifier.
+
+ @param sessionId the new session id.]]>
+ </doc>
+ </method>
+ <method name="setMaxTaskFailuresPerTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="noFailures" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds <code>noFailures</code>, the
+ tasktracker is <i>blacklisted</i> for this job.
+
+ @param noFailures maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxTaskFailuresPerTracker" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Expert: Get the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds this, the tasktracker is
+ <i>blacklisted</i> for this job.
+
+ @return the maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of map tasks that can fail without
+ the job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed map-task results in
+ the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the maximum percentage of map tasks that can fail without the
+ job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts} attempts
+ before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of reduce tasks that can fail without
+ the job being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed reduce-task results
+ in the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum percentage of reduce tasks that can fail without the job
+ being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="prio" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Set {@link JobPriority} for this job.
+
+ @param prio the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link JobPriority} for this job.
+
+ @return the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getProfileEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get whether the task profiling is enabled.
+ @return true if some tasks will be profiled]]>
+ </doc>
+ </method>
+ <method name="setProfileEnabled"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the system should collect profiler information for some of
+ the tasks in this job? The information is stored in the user log
+ directory.
+ @param newValue true means it should be gathered]]>
+ </doc>
+ </method>
+ <method name="getProfileParams" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the profiler configuration arguments.
+
+ The default value for this property is
+ "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s"
+
+ @return the parameters to pass to the task child to configure profiling]]>
+ </doc>
+ </method>
+ <method name="setProfileParams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the profiler configuration arguments. If the string contains a '%s' it
+ will be replaced with the name of the profiling output file when the task
+ runs.
+
+ This value is passed to the task child JVM on the command line.
+
+ @param value the configuration string]]>
+ </doc>
+ </method>
+ <method name="getProfileTaskRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <doc>
+ <![CDATA[Get the range of maps or reduces to profile.
+ @param isMap is the task a map?
+ @return the task ranges]]>
+ </doc>
+ </method>
+ <method name="setProfileTaskRange"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <param name="newValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the ranges of maps or reduces to profile. setProfileEnabled(true)
+ must also be called.
+ @param newValue a set of integer ranges of the map ids]]>
+ </doc>
+ </method>
+ <method name="setMapDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the map tasks fail.
+
+ <p>The debug script can aid debugging of failed map tasks. The script is
+ given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script needs to be symlinked. </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setMapDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param mDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getMapDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the map task's debug script.
+
+ @return the debug Script for the mapred job for failed map tasks.
+ @see #setMapDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="setReduceDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the reduce tasks fail.
+
+ <p>The debug script can aid debugging of failed reduce tasks. The script
+ is given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script file needs to be symlinked </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setReduceDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param rDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getReduceDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reduce task's debug Script
+
+ @return the debug script for the mapred job for failed reduce tasks.
+ @see #setReduceDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="getJobEndNotificationURI" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ @return the job end notification uri, <code>null</code> if it hasn't
+ been set.
+ @see #setJobEndNotificationURI(String)]]>
+ </doc>
+ </method>
+ <method name="setJobEndNotificationURI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ <p>The uri can contain 2 special parameters: <tt>$jobId</tt> and
+ <tt>$jobStatus</tt>. Those, if present, are replaced by the job's
+ identifier and completion-status respectively.</p>
+
+ <p>This is typically used by application-writers to implement chaining of
+ Map-Reduce jobs in an <i>asynchronous manner</i>.</p>
+
+ @param uri the job end notification uri
+ @see JobStatus
+ @see <a href="{@docRoot}/org/apache/hadoop/mapred/JobClient.html#JobCompletionAndChaining">Job Completion and Chaining</a>]]>
+ </doc>
+ </method>
+ <method name="getJobLocalDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get job-specific shared directory for use as scratch space
+
+ <p>
+ When a job starts, a shared directory is created at location
+ <code>
+ ${mapred.local.dir}/taskTracker/jobcache/$jobid/work/ </code>.
+ This directory is exposed to the users through
+ <code>job.local.dir </code>.
+ So, the tasks can use this space
+ as scratch space and share files among them. </p>
+ This value is available as System property also.
+
+ @return The localized job specific shared directory]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A map/reduce job configuration.
+
+ <p><code>JobConf</code> is the primary interface for a user to describe a
+ map-reduce job to the Hadoop framework for execution. The framework tries to
+ faithfully execute the job as-is described by <code>JobConf</code>, however:
+ <ol>
+ <li>
+ Some configuration parameters might have been marked as
+ <a href="{@docRoot}/org/apache/hadoop/conf/Configuration.html#FinalParams">
+ final</a> by administrators and hence cannot be altered.
+ </li>
+ <li>
+ While some job parameters are straight-forward to set
+ (e.g. {@link #setNumReduceTasks(int)}), some parameters interact subtly
+ rest of the framework and/or job-configuration and is relatively more
+ complex for the user to control finely (e.g. {@link #setNumMapTasks(int)}).
+ </li>
+ </ol></p>
+
+ <p><code>JobConf</code> typically specifies the {@link Mapper}, combiner
+ (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and
+ {@link OutputFormat} implementations to be used etc.
+
+ <p>Optionally <code>JobConf</code> is used to specify other advanced facets
+ of the job such as <code>Comparator</code>s to be used, files to be put in
+ the {@link DistributedCache}, whether or not intermediate and/or job outputs
+ are to be compressed (and how), debugability via user-provided scripts
+ ( {@link #setMapDebugScript(String)}/{@link #setReduceDebugScript(String)}),
+ for doing post-processing on task logs, task's stdout, stderr, syslog.
+ and etc.</p>
+
+ <p>Here is an example on how to configure a job via <code>JobConf</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ FileInputFormat.setInputPaths(job, new Path("in"));
+ FileOutputFormat.setOutputPath(job, new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setCombinerClass(MyJob.MyReducer.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ job.setInputFormat(SequenceFileInputFormat.class);
+ job.setOutputFormat(SequenceFileOutputFormat.class);
+ </pre></blockquote></p>
+
+ @see JobClient
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobConf -->
+ <!-- start interface org.apache.hadoop.mapred.JobConfigurable -->
+ <interface name="JobConfigurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Initializes a new instance from a {@link JobConf}.
+
+ @param job the configuration]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[That what may be configured.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobConfigurable -->
+ <!-- start class org.apache.hadoop.mapred.JobEndNotifier -->
+ <class name="JobEndNotifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobEndNotifier"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="startNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="stopNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="registerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ <method name="localRunnerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobEndNotifier -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory -->
+ <class name="JobHistory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="hostname" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Initialize JobHistory files.
+ @param conf Jobconf of the job tracker.
+ @param hostname jobtracker's hostname
+ @return true if intialized properly
+ false otherwise]]>
+ </doc>
+ </method>
+ <method name="parseHistoryFromFS"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="l" type="org.apache.hadoop.mapred.JobHistory.Listener"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parses history file and invokes Listener.handle() for
+ each line of history. It can be used for looking through history
+ files for specific items without having to keep whole history in memory.
+ @param path path to history file
+ @param l Listener for history events
+ @param fs FileSystem where history file is present
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isDisableHistory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns history disable status. by default history is enabled so this
+ method returns false.
+ @return true if history logging is disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="setDisableHistory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="disableHistory" type="boolean"/>
+ <doc>
+ <![CDATA[Enable/disable history logging. Default value is false, so history
+ is enabled by default.
+ @param disableHistory true if history should be disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <field name="JOBTRACKER_START_TIME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provides methods for writing to and reading from job history.
+ Job History works in an append mode, JobHistory and its inner classes provide methods
+ to log job events.
+
+ JobHistory is split into multiple files, format of each file is plain text where each line
+ is of the format [type (key=value)*], where type identifies the type of the record.
+ Type maps to UID of one of the inner classes of this class.
+
+ Job history is maintained in a master index which contains star/stop times of all jobs with
+ a few other job level properties. Apart from this each job's history is maintained in a seperate history
+ file. name of job history files follows the format jobtrackerId_jobid
+
+ For parsing the job history it supports a listener based interface where each line is parsed
+ and passed to listener. The listener can create an object model of history or look for specific
+ events and discard rest of the history.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <class name="JobHistory.HistoryCleaner" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobHistory.HistoryCleaner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Cleans up history data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Delete history files older than one month. Update master index and remove all
+ jobs older than one month. Also if a job tracker has no jobs in last one month
+ remove reference to the job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <class name="JobHistory.JobInfo" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.JobInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create new JobInfo]]>
+ </doc>
+ </constructor>
+ <method name="getAllTasks" return="java.util.Map&lt;java.lang.String, org.apache.hadoop.mapred.JobHistory.Task&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all map and reduce tasks <taskid-Task>.]]>
+ </doc>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Get the path of the locally stored job file
+ @param jobId id of the job
+ @return the path of the job file on the local file system]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFile" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the path of the job-history
+ log file.
+
+ @param logFile path of the job-history file
+ @return URL encoded path
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL encoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="decodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to decode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL decoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logSubmitted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="jobConfPath" type="java.lang.String"/>
+ <param name="submitTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="logSubmitted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="jobConfPath" type="java.lang.String"/>
+ <param name="submitTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Log job submitted event to history. Creates a new file in history
+ for the job. if history file creation fails, it disables history
+ for all other events.
+ @param jobId job id assigned by job tracker.
+ @param jobConf job conf of the job
+ @param jobConfPath path to job conf xml file in HDFS.
+ @param submitTime time when job tracker received the job
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs launch time of job.
+ @param jobId job id, assigned by jobtracker.
+ @param startTime start time of job.
+ @param totalMaps total maps assigned by jobtracker.
+ @param totalReduces total reduces.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <param name="failedMaps" type="int"/>
+ <param name="failedReduces" type="int"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="finishTime" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <param name="failedMaps" type="int"/>
+ <param name="failedReduces" type="int"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log job finished. closes the job file in history.
+ @param jobId job id, assigned by jobtracker.
+ @param finishTime finish time of job in ms.
+ @param finishedMaps no of maps successfully finished.
+ @param finishedReduces no of reduces finished sucessfully.
+ @param failedMaps no of failed map tasks.
+ @param failedReduces no of failed reduce tasks.
+ @param counters the counters from the job]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs job failed event. Closes the job history log file.
+ @param jobid job id
+ @param timestamp time when job failure was detected in ms.
+ @param finishedMaps no finished map tasks.
+ @param finishedReduces no of finished reduce tasks.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to job start, finish or failure.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <class name="JobHistory.Keys" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.Keys&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Keys[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Keys"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Job history files contain key="value" pairs, where keys belong to this enum.
+ It acts as a global namespace for all keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <!-- start interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <interface name="JobHistory.Listener" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="handle"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recType" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"/>
+ <param name="values" type="java.util.Map&lt;org.apache.hadoop.mapred.JobHistory.Keys, java.lang.String&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Callback method for history parser.
+ @param recType type of record, which is the first entry in the line.
+ @param values a map of key-value pairs as thry appear in history.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Callback interface for reading back log events from JobHistory. This interface
+ should be implemented and passed to JobHistory.parseHistory()]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <class name="JobHistory.MapAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.MapAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of this map task attempt.
+ @param taskAttemptId task attempt id
+ @param startTime start time of task attempt as reported by task tracker.
+ @param hostName host name of the task attempt.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="host" type="java.lang.String"/>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finish time of map task attempt.
+ @param taskAttemptId task attempt id
+ @param finishTime finish time
+ @param hostName host name]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="host" type="java.lang.String"/>
+ <param name="err" type="java.lang.String"/>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt failed event.
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostname" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt killed event.
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <class name="JobHistory.RecordTypes" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.RecordTypes&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.RecordTypes[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Record types are identifiers for each line of log in history files.
+ A record type appears as the first token in a single line of log.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <class name="JobHistory.ReduceAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.ReduceAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of Reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param startTime start time
+ @param hostName host name]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostname" type="java.lang.String"/>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finished event of this task.
+ @param taskAttemptId task attempt id
+ @param shuffleFinished shuffle finish time
+ @param sortFinished sort finish time
+ @param finishTime finish time of task
+ @param hostName host name where task attempt executed]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostname" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log failed reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostname" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log killed reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Task -->
+ <class name="JobHistory.Task" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.Task"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="splitLocations" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of task (TIP).
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param startTime startTime of tip.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finish time of task.
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param finishTime finish timeof task in ms]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log job failed event.
+ @param taskId task id
+ @param taskType MAP or REDUCE.
+ @param time timestamp when job failed detected.
+ @param error error message for failure.]]>
+ </doc>
+ </method>
+ <method name="getTaskAttempts" return="java.util.Map&lt;java.lang.String, org.apache.hadoop.mapred.JobHistory.TaskAttempt&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all task attempts for this task. <task attempt id - TaskAttempt>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to Task's start, finish or failure.
+ All events logged by this class are logged in a separate file per job in
+ job tracker history. These events map to TIPs in jobtracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Task -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <class name="JobHistory.TaskAttempt" extends="org.apache.hadoop.mapred.JobHistory.Task"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.TaskAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Base class for Map and Reduce TaskAttempts.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Values -->
+ <class name="JobHistory.Values" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.Values&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Values[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Values"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[This enum contains some of the values commonly used by history log events.
+ since values in history can only be strings - Values.name() is used in
+ most places in history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Values -->
+ <!-- start class org.apache.hadoop.mapred.JobID -->
+ <class name="JobID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobID" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a JobID object
+ @param jtIdentifier jobTracker identifier
+ @param id job number]]>
+ </doc>
+ </constructor>
+ <method name="getJtIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare JobIds by first jtIdentifiers, then by job numbers]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a JobId object from given string
+ @return constructed JobId object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getJobIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>any job</i>
+ run on the jobtracker started at <i>200707121733</i>, we would use :
+ <pre>
+ JobID.getTaskIDsPattern("200707121733", null);
+ </pre>
+ which will return :
+ <pre> "job_200707121733_[0-9]*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @return a regex pattern matching JobIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[JobID represents the immutable and unique identifier for
+ the job. JobID consists of two parts. First part
+ represents the jobtracker identifier, so that jobID to jobtracker map
+ is defined. For cluster setup this string is the jobtracker
+ start time, for local setting, it is "local".
+ Second part of the JobID is the job number. <br>
+ An example JobID is :
+ <code>job_200707121733_0003</code> , which represents the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse JobID strings, but rather
+ use appropriate constructors or {@link #forName(String)} method.
+
+ @see TaskID
+ @see TaskAttemptID
+ @see JobTracker#getNewJobId()
+ @see JobTracker#getStartTime()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobID -->
+ <!-- start class org.apache.hadoop.mapred.JobPriority -->
+ <class name="JobPriority" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobPriority&gt;"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobPriority[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Used to describe the priority of the running job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobPriority -->
+ <!-- start class org.apache.hadoop.mapred.JobProfile -->
+ <class name="JobProfile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobProfile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an empty {@link JobProfile}.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, org.apache.hadoop.mapred.JobID, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a {@link JobProfile} the userid, jobid,
+ job config-file, job-details url and job name.
+
+ @param user userid of the person who submitted the job.
+ @param jobid id of the job.
+ @param jobFile job configuration file.
+ @param url link to the web-ui for details of the job.
+ @param name user-specified job name.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="use JobProfile(String, JobID, String, String, String) instead">
+ <doc>
+ <![CDATA[@deprecated use JobProfile(String, JobID, String, String, String) instead]]>
+ </doc>
+ </constructor>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user id.]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job id.]]>
+ </doc>
+ </method>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use getJobID() instead">
+ <doc>
+ <![CDATA[@deprecated use getJobID() instead]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configuration file for the job.]]>
+ </doc>
+ </method>
+ <method name="getURL" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the link to the web-ui for details of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A JobProfile is a MapReduce primitive. Tracks a job,
+ whether living or dead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobProfile -->
+ <!-- start class org.apache.hadoop.mapred.JobShell -->
+ <class name="JobShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run method from Tool]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Provide command line parsing for JobSubmission
+ job submission looks like
+ hadoop jar -libjars <comma seperated jars> -archives <comma seperated archives>
+ -files <comma seperated files> inputjar args]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobShell -->
+ <!-- start class org.apache.hadoop.mapred.JobStatus -->
+ <class name="JobStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobStatus" type="java.lang.String, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job]]>
+ </doc>
+ </constructor>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use getJobID instead">
+ <doc>
+ <![CDATA[@deprecated use getJobID instead]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The jobid of the Job]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in maps]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in reduce]]>
+ </doc>
+ </method>
+ <method name="getRunState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return running state of the job]]>
+ </doc>
+ </method>
+ <method name="setRunState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Change the current run state of the job.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return start time of the job]]>
+ </doc>
+ </method>
+ <method name="getUsername" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the username of the job]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SUCCEEDED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PREP" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Describes the current status of a job. This is
+ not intended to be a comprehensive piece of data.
+ For that, look at JobProfile.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobStatus -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker -->
+ <class name="JobTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.InterTrackerProtocol"/>
+ <implements name="org.apache.hadoop.mapred.JobSubmissionProtocol"/>
+ <method name="startTracker" return="org.apache.hadoop.mapred.JobTracker"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker with given configuration.
+
+ The conf will be modified to reflect the actual ports on which
+ the JobTracker is up and running if the user passes the port as
+ <code>zero</code>.
+
+ @param conf configuration for the JobTracker.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="offerService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Run forever]]>
+ </doc>
+ </method>
+ <method name="getTotalSubmissions" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobTrackerMachine" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTrackerIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the unique identifier (ie. timestamp) of this job tracker start.
+ @return a string with a unique identifier]]>
+ </doc>
+ </method>
+ <method name="getTrackerPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="runningJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRunningJobs" return="java.util.List&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version that is called from a timer thread, and therefore needs to be
+ careful to synchronize.]]>
+ </doc>
+ </method>
+ <method name="failedJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="completedJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="taskTrackers" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskTracker" return="org.apache.hadoop.mapred.TaskTrackerStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trackerID" type="java.lang.String"/>
+ </method>
+ <method name="resolveAndAddToTopology" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getNodesAtMaxLevel" return="java.util.Collection&lt;org.apache.hadoop.net.Node&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a collection of nodes at the max level]]>
+ </doc>
+ </method>
+ <method name="getParentNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <param name="level" type="int"/>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the Node in the network topology that corresponds to the hostname]]>
+ </doc>
+ </method>
+ <method name="getNumTaskCacheLevels" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumResolvedTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="heartbeat" return="org.apache.hadoop.mapred.HeartbeatResponse"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskTrackerStatus"/>
+ <param name="initialContact" type="boolean"/>
+ <param name="acceptNewTasks" type="boolean"/>
+ <param name="responseId" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The periodic heartbeat mechanism between the {@link TaskTracker} and
+ the {@link JobTracker}.
+
+ The {@link JobTracker} processes the status information sent by the
+ {@link TaskTracker} and responds with instructions to start/stop
+ tasks or jobs, and also 'reset' instructions during contingencies.]]>
+ </doc>
+ </method>
+ <method name="getFilesystemName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Grab the local fs name]]>
+ </doc>
+ </method>
+ <method name="reportTaskTrackerError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTracker" type="java.lang.String"/>
+ <param name="errorClass" type="java.lang.String"/>
+ <param name="errorMessage" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNewJobId" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Allocates a new JobId string.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[JobTracker.submitJob() kicks off a new job.
+
+ Create a 'JobInProgress' object, which contains both JobProfile
+ and JobStatus. Those two sub-objects are sometimes shipped outside
+ of the JobTracker. But JobInProgress adds info that's useful for
+ the JobTracker alone.
+
+ We add the JIP to the jobInitQueue, which is processed
+ asynchronously to handle split-computation and build up
+ the right TaskTracker/Block mapping.]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ </method>
+ <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ </method>
+ <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ </method>
+ <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="fromid" type="int"/>
+ <param name="maxevents" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxEvents" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="tipid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the diagnostics for a given task
+ @param taskId the id of the task
+ @return an array of the diagnostic messages]]>
+ </doc>
+ </method>
+ <method name="getTip" return="org.apache.hadoop.mapred.TaskInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tipid" type="org.apache.hadoop.mapred.TaskID"/>
+ <doc>
+ <![CDATA[Returns specified TaskInProgress, or null.]]>
+ </doc>
+ </method>
+ <method name="killTask" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="killTask" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a Task to be killed]]>
+ </doc>
+ </method>
+ <method name="getAssignedTracker" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ </method>
+ <method name="getAssignedTracker" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Get tracker name for a given task id.
+ @param taskId the name of the task
+ @return The name of the task tracker]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSystemDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@see org.apache.hadoop.mapred.JobSubmissionProtocol#getSystemDir()]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.JobInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.JobInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Get the localized job file path on the job trackers local file system
+ @param jobId id of the job
+ @return the path of the job conf file on the local file system]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker process. This is used only for debugging. As a rule,
+ JobTracker should be run as part of the DFS Namenode process.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[JobTracker is the central location for submitting and
+ tracking MR jobs in a network environment.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <class name="JobTracker.IllegalStateException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobTracker.IllegalStateException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A client tried to submit a job before the Job Tracker was ready.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.State -->
+ <class name="JobTracker.State" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobTracker.State&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobTracker.State[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.State -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <class name="KeyValueLineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="KeyValueLineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="findSeparator" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="sep" type="byte"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class treats a line in the input as a key/value pair separated by a
+ separator character. The separator can be specified in config file
+ under the attribute name key.value.separator.in.input.line. The default
+ separator is the tab character ('\t').]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <class name="KeyValueTextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="KeyValueTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Each line
+ is divided into key and value parts by a separator byte. If no such a byte
+ exists, the key will be the entire line and value will be empty.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader -->
+ <class name="LineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="LineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.LongWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.LongWritable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the progress within the split]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Treats keys as offset in file and value as line.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <class name="LineRecordReader.LineReader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LineRecordReader.LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ <code>io.file.buffer.size</code> specified in the given
+ <code>Configuration</code>.
+ @param in input stream
+ @param conf configuration
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the underlying stream.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <param name="maxLineLength" type="int"/>
+ <param name="maxBytesToConsume" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @param maxLineLength the maximum number of bytes to store into str.
+ @param maxBytesToConsume the maximum number of bytes to consume in this call.
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <param name="maxLineLength" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @param maxLineLength the maximum number of bytes to store into str.
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides a line reader from an input stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <!-- start class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <class name="MapFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.MapFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getEntry" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readers" type="org.apache.hadoop.io.MapFile.Reader[]"/>
+ <param name="partitioner" type="org.apache.hadoop.mapred.Partitioner&lt;K, V&gt;"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an entry from output generated by this class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link MapFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.Mapper -->
+ <interface name="Mapper" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1"/>
+ <param name="value" type="V1"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Maps a single input key/value pair into an intermediate key/value pair.
+
+ <p>Output pairs need not be of the same types as input pairs. A given
+ input pair may map to zero or many output pairs. Output pairs are
+ collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the input key.
+ @param value the input value.
+ @param output collects mapped keys and values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Maps input key/value pairs to a set of intermediate key/value pairs.
+
+ <p>Maps are the individual tasks which transform input records into a
+ intermediate records. The transformed intermediate records need not be of
+ the same type as the input records. A given input pair may map to zero or
+ many output pairs.</p>
+
+ <p>The Hadoop Map-Reduce framework spawns one map task for each
+ {@link InputSplit} generated by the {@link InputFormat} for the job.
+ <code>Mapper</code> implementations can access the {@link JobConf} for the
+ job via the {@link JobConfigurable#configure(JobConf)} and initialize
+ themselves. Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p>The framework then calls
+ {@link #map(Object, Object, OutputCollector, Reporter)}
+ for each key/value pair in the <code>InputSplit</code> for that task.</p>
+
+ <p>All intermediate values associated with a given output key are
+ subsequently grouped by the framework, and passed to a {@link Reducer} to
+ determine the final output. Users can control the grouping by specifying
+ a <code>Comparator</code> via
+ {@link JobConf#setOutputKeyComparatorClass(Class)}.</p>
+
+ <p>The grouped <code>Mapper</code> outputs are partitioned per
+ <code>Reducer</code>. Users can control which keys (and hence records) go to
+ which <code>Reducer</code> by implementing a custom {@link Partitioner}.
+
+ <p>Users can optionally specify a <code>combiner</code>, via
+ {@link JobConf#setCombinerClass(Class)}, to perform local aggregation of the
+ intermediate outputs, which helps to cut down the amount of data transferred
+ from the <code>Mapper</code> to the <code>Reducer</code>.
+
+ <p>The intermediate, grouped outputs are always stored in
+ {@link SequenceFile}s. Applications can specify if and how the intermediate
+ outputs are to be compressed and which {@link CompressionCodec}s are to be
+ used via the <code>JobConf</code>.</p>
+
+ <p>If the job has
+ <a href="{@docRoot}/org/apache/hadoop/mapred/JobConf.html#ReducerNone">zero
+ reduces</a> then the output of the <code>Mapper</code> is directly written
+ to the {@link FileSystem} without grouping by keys.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyMapper&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Mapper&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String mapTaskId;
+ private String inputFile;
+ private int noRecords = 0;
+
+ public void configure(JobConf job) {
+ mapTaskId = job.get("mapred.task.id");
+ inputFile = job.get("mapred.input.file");
+ }
+
+ public void map(K key, V val,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ // reporter.progress();
+
+ // Process some more
+ // ...
+ // ...
+
+ // Increment the no. of &lt;key, value&gt; pairs processed
+ ++noRecords;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 records update application-level status
+ if ((noRecords%100) == 0) {
+ reporter.setStatus(mapTaskId + " processed " + noRecords +
+ " from input-file: " + inputFile);
+ }
+
+ // Output the result
+ output.collect(key, val);
+ }
+ }
+ </pre></blockquote></p>
+
+ <p>Applications may write a custom {@link MapRunnable} to exert greater
+ control on map processing e.g. multi-threaded <code>Mapper</code>s etc.</p>
+
+ @see JobConf
+ @see InputFormat
+ @see Partitioner
+ @see Reducer
+ @see MapReduceBase
+ @see MapRunnable
+ @see SequenceFile]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Mapper -->
+ <!-- start class org.apache.hadoop.mapred.MapReduceBase -->
+ <class name="MapReduceBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="MapReduceBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for {@link Mapper} and {@link Reducer} implementations.
+
+ <p>Provides default no-op implementations for a few methods, most non-trivial
+ applications need to override some of them.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapReduceBase -->
+ <!-- start interface org.apache.hadoop.mapred.MapRunnable -->
+ <interface name="MapRunnable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start mapping input <tt>&lt;key, value&gt;</tt> pairs.
+
+ <p>Mapping of input records to output records is complete when this method
+ returns.</p>
+
+ @param input the {@link RecordReader} to read the input records.
+ @param output the {@link OutputCollector} to collect the outputrecords.
+ @param reporter {@link Reporter} to report progress, status-updates etc.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Expert: Generic interface for {@link Mapper}s.
+
+ <p>Custom implementations of <code>MapRunnable</code> can exert greater
+ control on map processing e.g. multi-threaded, asynchronous mappers etc.</p>
+
+ @see Mapper]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.MapRunnable -->
+ <!-- start class org.apache.hadoop.mapred.MapRunner -->
+ <class name="MapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="MapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Default {@link MapRunnable} implementation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapRunner -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <class name="MultiFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;K, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultiFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s
+ in {@link #getSplits(JobConf, int)} method. Splits are constructed from
+ the files under the input paths. Each split returned contains <i>nearly</i>
+ equal content length. <br>
+ Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)}
+ to construct <code>RecordReader</code>'s for <code>MultiFileSplit</code>'s.
+ @see MultiFileSplit]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileSplit -->
+ <class name="MultiFileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="MultiFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLengths" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array containing the lengths of the files in
+ the split]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the length of the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getNumPaths" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all the Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A sub-collection of input files. Unlike {@link FileSplit}, MultiFileSplit
+ class does not represent a split of a file, but a split of input files
+ into smaller sets. The atomic unit of split is a file. <br>
+ MultiFileSplit can be used to implement {@link RecordReader}'s, with
+ reading one record per file.
+ @see FileSplit
+ @see MultiFileInputFormat]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileSplit -->
+ <!-- start interface org.apache.hadoop.mapred.OutputCollector -->
+ <interface name="OutputCollector" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="collect"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Adds a key/value pair to the output.
+
+ @param key the key to collect.
+ @param value to value to collect.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Collects the <code>&lt;key, value&gt;</code> pairs output by {@link Mapper}s
+ and {@link Reducer}s.
+
+ <p><code>OutputCollector</code> is the generalization of the facility
+ provided by the Map-Reduce framework to collect data output by either the
+ <code>Mapper</code> or the <code>Reducer</code> i.e. intermediate outputs
+ or the output of the job.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputCollector -->
+ <!-- start interface org.apache.hadoop.mapred.OutputFormat -->
+ <interface name="OutputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordWriter} for the given job.
+
+ @param ignored
+ @param job configuration for the job whose output is being written.
+ @param name the unique name for this part of the output.
+ @param progress mechanism for reporting progress while writing to file.
+ @return a {@link RecordWriter} to write the output for the job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check for validity of the output-specification for the job.
+
+ <p>This is to validate the output specification for the job when it is
+ a job is submitted. Typically checks that it does not already exist,
+ throwing an exception when it already exists, so that output is not
+ overwritten.</p>
+
+ @param ignored
+ @param job job configuration.
+ @throws IOException when output should not be attempted]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>OutputFormat</code> describes the output-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the output-specification of the job. For e.g. check that the
+ output directory doesn't already exist.
+ <li>
+ Provide the {@link RecordWriter} implementation to be used to write out
+ the output files of the job. Output files are stored in a
+ {@link FileSystem}.
+ </li>
+ </ol>
+
+ @see RecordWriter
+ @see JobConf]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.OutputFormatBase -->
+ <class name="OutputFormatBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileOutputFormat}">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="OutputFormatBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setCompressOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+ <code>false</code> otherwise]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+ compress the job outputs]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the
+ job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A base class for {@link OutputFormat}.
+ @deprecated Use {@link FileOutputFormat}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputFormatBase -->
+ <!-- start class org.apache.hadoop.mapred.OutputLogFilter -->
+ <class name="OutputLogFilter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.PathFilter"/>
+ <constructor name="OutputLogFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <doc>
+ <![CDATA[This class filters log files from directory given
+ It doesnt accept paths having _logs.
+ This can be used to list paths of output directory as follows:
+ Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
+ new OutputLogFilter()));]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputLogFilter -->
+ <!-- start interface org.apache.hadoop.mapred.Partitioner -->
+ <interface name="Partitioner" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numPartitions" type="int"/>
+ <doc>
+ <![CDATA[Get the paritition number for a given key (hence record) given the total
+ number of partitions i.e. number of reduce-tasks for the job.
+
+ <p>Typically a hash function on a all or a subset of the key.</p>
+
+ @param key the key to be paritioned.
+ @param value the entry value.
+ @param numPartitions the total number of partitions.
+ @return the partition number for the <code>key</code>.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partitions the key space.
+
+ <p><code>Partitioner</code> controls the partitioning of the keys of the
+ intermediate map-outputs. The key (or a subset of the key) is used to derive
+ the partition, typically by a hash function. The total number of partitions
+ is the same as the number of reduce tasks for the job. Hence this controls
+ which of the <code>m</code> reduce tasks the intermediate key (and hence the
+ record) is sent for reduction.</p>
+
+ @see Reducer]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Partitioner -->
+ <!-- start interface org.apache.hadoop.mapred.RecordReader -->
+ <interface name="RecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the next key/value pair from the input for processing.
+
+ @param key the key to read data into
+ @param value the value to read data into
+ @return true iff a key/value was read, false if at EOF]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a key.
+
+ @return a new key object.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a value.
+
+ @return a new value object.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current position in the input.
+
+ @return the current position in the input.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this {@link InputSplit} to future operations.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[How much of the input has the {@link RecordReader} consumed i.e.
+ has been processed by?
+
+ @return progress from <code>0.0</code> to <code>1.0</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordReader</code> reads &lt;key, value&gt; pairs from an
+ {@link InputSplit}.
+
+ <p><code>RecordReader</code>, typically, converts the byte-oriented view of
+ the input, provided by the <code>InputSplit</code>, and presents a
+ record-oriented view for the {@link Mapper} & {@link Reducer} tasks for
+ processing. It thus assumes the responsibility of processing record
+ boundaries and presenting the tasks with keys and values.</p>
+
+ @see InputSplit
+ @see InputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordReader -->
+ <!-- start interface org.apache.hadoop.mapred.RecordWriter -->
+ <interface name="RecordWriter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes a key/value pair.
+
+ @param key the key to write.
+ @param value the value to write.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this <code>RecordWriter</code> to future operations.
+
+ @param reporter facility to report progress.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordWriter</code> writes the output &lt;key, value&gt; pairs
+ to an output file.
+
+ <p><code>RecordWriter</code> implementations write the job outputs to the
+ {@link FileSystem}.
+
+ @see OutputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordWriter -->
+ <!-- start interface org.apache.hadoop.mapred.Reducer -->
+ <interface name="Reducer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="values" type="java.util.Iterator&lt;V2&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K3, V3&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<i>Reduces</i> values for a given key.
+
+ <p>The framework calls this method for each
+ <code>&lt;key, (list of values)></code> pair in the grouped inputs.
+ Output values must be of the same type as input values. Input keys must
+ not be altered. The framework will <b>reuse</b> the key and value objects
+ that are passed into the reduce, therefore the application should clone
+ the objects they want to keep a copy of. In many cases, all values are
+ combined into zero or one value.
+ </p>
+
+ <p>Output pairs are collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the key.
+ @param values the list of values to reduce.
+ @param output to collect keys and combined values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reduces a set of intermediate values which share a key to a smaller set of
+ values.
+
+ <p>The number of <code>Reducer</code>s for the job is set by the user via
+ {@link JobConf#setNumReduceTasks(int)}. <code>Reducer</code> implementations
+ can access the {@link JobConf} for the job via the
+ {@link JobConfigurable#configure(JobConf)} method and initialize themselves.
+ Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p><code>Reducer</code> has 3 primary phases:</p>
+ <ol>
+ <li>
+
+ <h4 id="Shuffle">Shuffle</h4>
+
+ <p><code>Reducer</code> is input the grouped output of a {@link Mapper}.
+ In the phase the framework, for each <code>Reducer</code>, fetches the
+ relevant partition of the output of all the <code>Mapper</code>s, via HTTP.
+ </p>
+ </li>
+
+ <li>
+ <h4 id="Sort">Sort</h4>
+
+ <p>The framework groups <code>Reducer</code> inputs by <code>key</code>s
+ (since different <code>Mapper</code>s may have output the same key) in this
+ stage.</p>
+
+ <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
+ being fetched they are merged.</p>
+
+ <h5 id="SecondarySort">SecondarySort</h5>
+
+ <p>If equivalence rules for keys while grouping the intermediates are
+ different from those for grouping keys before reduction, then one may
+ specify a <code>Comparator</code> via
+ {@link JobConf#setOutputValueGroupingComparator(Class)}.Since
+ {@link JobConf#setOutputKeyComparatorClass(Class)} can be used to
+ control how intermediate keys are grouped, these can be used in conjunction
+ to simulate <i>secondary sort on values</i>.</p>
+
+
+ For example, say that you want to find duplicate web pages and tag them
+ all with the url of the "best" known example. You would set up the job
+ like:
+ <ul>
+ <li>Map Input Key: url</li>
+ <li>Map Input Value: document</li>
+ <li>Map Output Key: document checksum, url pagerank</li>
+ <li>Map Output Value: url</li>
+ <li>Partitioner: by checksum</li>
+ <li>OutputKeyComparator: by checksum and then decreasing pagerank</li>
+ <li>OutputValueGroupingComparator: by checksum</li>
+ </ul>
+ </li>
+
+ <li>
+ <h4 id="Reduce">Reduce</h4>
+
+ <p>In this phase the
+ {@link #reduce(Object, Iterator, OutputCollector, Reporter)}
+ method is called for each <code>&lt;key, (list of values)></code> pair in
+ the grouped inputs.</p>
+ <p>The output of the reduce task is typically written to the
+ {@link FileSystem} via
+ {@link OutputCollector#collect(Object, Object)}.</p>
+ </li>
+ </ol>
+
+ <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyReducer&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Reducer&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String reduceTaskId;
+ private int noKeys = 0;
+
+ public void configure(JobConf job) {
+ reduceTaskId = job.get("mapred.task.id");
+ }
+
+ public void reduce(K key, Iterator&lt;V&gt; values,
+ OutputCollector&lt;K, V&gt; output,
+ Reporter reporter)
+ throws IOException {
+
+ // Process
+ int noValues = 0;
+ while (values.hasNext()) {
+ V value = values.next();
+
+ // Increment the no. of values for this key
+ ++noValues;
+
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ if ((noValues%10) == 0) {
+ reporter.progress();
+ }
+
+ // Process some more
+ // ...
+ // ...
+
+ // Output the &lt;key, value&gt;
+ output.collect(key, value);
+ }
+
+ // Increment the no. of &lt;key, list of values&gt; pairs processed
+ ++noKeys;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 keys update application-level status
+ if ((noKeys%100) == 0) {
+ reporter.setStatus(reduceTaskId + " processed " + noKeys);
+ }
+ }
+ }
+ </pre></blockquote></p>
+
+ @see Mapper
+ @see Partitioner
+ @see Reporter
+ @see MapReduceBase]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reducer -->
+ <!-- start interface org.apache.hadoop.mapred.Reporter -->
+ <interface name="Reporter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Progressable"/>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the status description for the task.
+
+ @param status brief description of the current status.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the key, which can be of
+ any {@link Enum} type, by the specified amount.
+
+ @param key key to identify the counter to be incremented. The key can be
+ be any <code>Enum</code>.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="counter" type="java.lang.String"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the group and counter name
+ by the specified amount.
+
+ @param group name to identify the group of the counter to be incremented.
+ @param counter name to identify the counter within the group.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="getInputSplit" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+ <doc>
+ <![CDATA[Get the {@link InputSplit} object for a map.
+
+ @return the <code>InputSplit</code> that the map is reading from.
+ @throws UnsupportedOperationException if called outside a mapper]]>
+ </doc>
+ </method>
+ <field name="NULL" type="org.apache.hadoop.mapred.Reporter"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A constant of Reporter type that does nothing.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A facility for Map-Reduce applications to report progress and update
+ counters, status information etc.
+
+ <p>{@link Mapper} and {@link Reducer} can use the <code>Reporter</code>
+ provided to report progress or just indicate that they are alive. In
+ scenarios where the application takes an insignificant amount of time to
+ process individual key/value pairs, this is crucial since the framework
+ might assume that the task has timed-out and kill that task.
+
+ <p>Applications can also update {@link Counters} via the provided
+ <code>Reporter</code> .</p>
+
+ @see Progressable
+ @see Counters]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reporter -->
+ <!-- start interface org.apache.hadoop.mapred.RunningJob -->
+ <interface name="RunningJob" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job identifier.
+
+ @return the job identifier.]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="This method is deprecated and will be removed. Applications should
+ rather use {@link #getID()}.">
+ <doc>
+ <![CDATA[@deprecated This method is deprecated and will be removed. Applications should
+ rather use {@link #getID()}.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the job.
+
+ @return the name of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the path of the submitted job configuration.
+
+ @return the path of the submitted job configuration.]]>
+ </doc>
+ </method>
+ <method name="getTrackingURL" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the URL where some job progress information will be displayed.
+
+ @return the URL where some job progress information will be displayed.]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's map-tasks, as a float between 0.0
+ and 1.0. When all map tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's map-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0
+ and 1.0. When all reduce tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's reduce-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isComplete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job is finished or not.
+ This is a non-blocking call.
+
+ @return <code>true</code> if the job is complete, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isSuccessful" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job completed successfully.
+
+ @return <code>true</code> if the job succeeded, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="waitForCompletion"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Blocks until the job is complete.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill the running job. Blocks until all job tasks have been
+ killed as well. If the job is no longer running, it simply returns.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="startFrom" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get events indicating completion (success/failure) of component tasks.
+
+ @param startFrom index to start fetching events from
+ @return an array of {@link TaskCompletionEvent}s
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill indicated task attempt.
+
+ @param taskId the id of the task to be terminated.
+ @param shouldFail if true the task is failed and added to failed tasks
+ list, otherwise it is just killed, w/o affecting
+ job failure status.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #killTask(TaskAttemptID, boolean)}">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the counters for this job.
+
+ @return the counters for this job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RunningJob</code> is the user-interface to query for details on a
+ running Map-Reduce job.
+
+ <p>Clients can get hold of <code>RunningJob</code> via the {@link JobClient}
+ and then query the running-job for details such as name, configuration,
+ progress etc.</p>
+
+ @see JobClient]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RunningJob -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <class name="SequenceFileAsBinaryInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[InputFormat reading keys, values from SequenceFiles in binary (raw)
+ format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <class name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"/>
+ <constructor name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the key class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getKeyClassName]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the value class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getValueClassName]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.BytesWritable"/>
+ <param name="val" type="org.apache.hadoop.io.BytesWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read raw bytes from a SequenceFile.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Read records from a SequenceFile as binary (raw) bytes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+ <class name="SequenceFileAsBinaryOutputFormat" extends="org.apache.hadoop.mapred.SequenceFileOutputFormat&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setSequenceFileOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the {@link SequenceFile}
+ <p>This allows the user to specify the key class to be different
+ from the actual class ({@link BytesWritable}) used for writing </p>
+
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+ </doc>
+ </method>
+ <method name="setSequenceFileOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for the {@link SequenceFile}
+ <p>This allows the user to specify the value class to be different
+ from the actual class ({@link BytesWritable}) used for writing </p>
+
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+ </doc>
+ </method>
+ <method name="getSequenceFileOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the key class for the {@link SequenceFile}
+
+ @return the key class of the {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <method name="getSequenceFileOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the value class for the {@link SequenceFile}
+
+ @return the value class of the {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes keys, values to
+ {@link SequenceFile}s in binary(raw) format]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
+ <class name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" type="org.apache.hadoop.io.BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.BytesWritable"/>
+ </method>
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Inner class used for appendRaw]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <class name="SequenceFileAsTextInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is similar to SequenceFileInputFormat, except it generates SequenceFileAsTextRecordReader
+ which converts the input keys and values to their String forms by calling toString() method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <class name="SequenceFileAsTextRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="SequenceFileAsTextRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class converts the input keys and values to their String forms by calling toString()
+ method. This class to SequenceFileAsTextInputFormat class is as LineRecordReader
+ class to TextInputFormat class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <class name="SequenceFileInputFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a record reader for the given split
+ @param split file split
+ @param job job configuration
+ @param reporter reporter who sends report to task tracker
+ @return RecordReader]]>
+ </doc>
+ </method>
+ <method name="setFilterClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="filterClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[set the filter class
+
+ @param conf application configuration
+ @param filterClass filter class]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that allows a map/red job to work on a sample of sequence files.
+ The sample is decided by the filter class set by the job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <!-- start interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <interface name="SequenceFileInputFilter.Filter" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[filter function
+ Decide if a record should be filtered or not
+ @param key record key
+ @return true if a record is accepted; return false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[filter interface]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <class name="SequenceFileInputFilter.FilterBase" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.SequenceFileInputFilter.Filter"/>
+ <constructor name="SequenceFileInputFilter.FilterBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[base class for Filters]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <class name="SequenceFileInputFilter.MD5Filter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.MD5Filter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the filtering frequency in configuration
+
+ @param conf configuration
+ @param frequency filtering frequency]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter according to configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If MD5(key) % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class returns a set of records by examing the MD5 digest of its
+ key against a filtering frequency <i>f</i>. The filtering criteria is
+ MD5(key) % f == 0.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <class name="SequenceFileInputFilter.PercentFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.PercentFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the frequency and stores it in conf
+ @param conf configuration
+ @param frequency filtering frequencey]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter by checking the configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If record# % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class returns a percentage of records
+ The percentage is determined by a filtering frequency <i>f</i> using
+ the criteria record# % f == 0.
+ For example, if the frequency is 10, one out of 10 records is returned.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <class name="SequenceFileInputFilter.RegexFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.RegexFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setPattern"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="regex" type="java.lang.String"/>
+ <exception name="PatternSyntaxException" type="java.util.regex.PatternSyntaxException"/>
+ <doc>
+ <![CDATA[Define the filtering regex and stores it in conf
+ @param conf where the regex is set
+ @param regex regex used as a filter]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the Filter by checking the configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If key matches the regex, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Records filter by matching key to regex]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <class name="SequenceFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="listPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <class name="SequenceFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.SequenceFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf}
+ @return the {@link CompressionType} for the output {@link SequenceFile},
+ defaulting to {@link CompressionType#RECORD}]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf} to modify
+ @param style the {@link CompressionType} for the output
+ {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <class name="SequenceFileRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"/>
+ <constructor name="SequenceFileRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of key that must be passed to {@link
+ #next(Object, Object)}..]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of value that must be passed to {@link
+ #next(Object, Object)}..]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="conf" type="org.apache.hadoop.conf.Configuration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An {@link RecordReader} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer -->
+ <class name="StatusHttpServer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer" type="java.lang.String, java.lang.String, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a status server on the given port.
+ The jsp scripts are taken from src/webapps/<name>.
+ @param name The name of the server
+ @param port The port to use on the server
+ @param findPort whether the server should start at the given port and
+ increment by 1 until it finds a free port.]]>
+ </doc>
+ </constructor>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Set a value in the webapp context. These values are available to the jsp
+ pages as "application.getAttribute(name)".
+ @param name The name of the attribute
+ @param value The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="addServlet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="servletClass" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Add a servlet in the server.
+ @param name The name of the servlet (can be passed as null)
+ @param pathSpec The path spec for the servlet
+ @param servletClass The servlet class]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value in the webapp context.
+ @param name The name of the attribute
+ @return The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the port that the server is on
+ @return the port]]>
+ </doc>
+ </method>
+ <method name="setThreads"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="min" type="int"/>
+ <param name="max" type="int"/>
+ </method>
+ <method name="addSslListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="keystore" type="java.lang.String"/>
+ <param name="storPass" type="java.lang.String"/>
+ <param name="keyPass" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Configure an ssl listener on the server.
+ @param addr address to listen on
+ @param keystore location of the keystore
+ @param storPass password for the keystore
+ @param keyPass password for the key]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start the server. Does not wait for the server to start.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[stop the server]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Create a Jetty embedded server to answer http requests. The primary goal
+ is to serve up status information for the server.
+ There are three contexts:
+ "/logs/" -> points to the log directory
+ "/static/" -> points to common static files (src/webapps/static)
+ "/" -> the jsp server code from (src/webapps/<name>)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer.StackServlet -->
+ <class name="StatusHttpServer.StackServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer.StackServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A very simple servlet to serve up a text representation of the current
+ stack traces. It both returns the stacks to the caller and logs them.
+ Currently the stack traces are done sequentially rather than exactly the
+ same data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer.StackServlet -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet -->
+ <class name="StatusHttpServer.TaskGraphServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer.TaskGraphServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="width" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="height" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="ymargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on y axis]]>
+ </doc>
+ </field>
+ <field name="xmargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on x axis]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The servlet that outputs svg graphics for map / reduce task
+ statuses]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskAttemptID -->
+ <class name="TaskAttemptID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskAttemptID" type="org.apache.hadoop.mapred.TaskID, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskAttemptID object from given {@link TaskID}.
+ @param taskId TaskID that this task belongs to
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskAttemptID" type="java.lang.String, int, boolean, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param taskId taskId number
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link JobID} object that this task attempt belongs to]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link TaskID} object that this task attempt belongs to]]>
+ </doc>
+ </method>
+ <method name="isMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns whether this TaskAttemptID is a map ID]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare TaskIds by first tipIds, then by task numbers.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskAttemptID object from given string
+ @return constructed TaskAttemptID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getTaskAttemptIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <param name="isMap" type="java.lang.Boolean"/>
+ <param name="taskId" type="java.lang.Integer"/>
+ <param name="attemptId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task attempt IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>all task attempt IDs</i>
+ of <i>any jobtracker</i>, in <i>any job</i>, of the <i>first
+ map task</i>, we would use :
+ <pre>
+ TaskAttemptID.getTaskAttemptIDsPattern(null, null, true, 1, null);
+ </pre>
+ which will return :
+ <pre> "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null
+ @param taskId taskId number, or null
+ @param attemptId the task attempt number, or null
+ @return a regex pattern matching TaskAttemptIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[TaskAttemptID represents the immutable and unique identifier for
+ a task attempt. Each task attempt is one particular instance of a Map or
+ Reduce Task identified by its TaskID.
+
+ TaskAttemptID consists of 2 parts. First part is the
+ {@link TaskID}, that this TaskAttemptID belongs to.
+ Second part is the task attempt number. <br>
+ An example TaskAttemptID is :
+ <code>attempt_200707121733_0003_m_000005_0</code> , which represents the
+ zeroth task attempt for the fifth map task in the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskAttemptID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskAttemptID -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <class name="TaskCompletionEvent" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskCompletionEvent"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor for Writable.]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskCompletionEvent" type="int, java.lang.String, int, boolean, org.apache.hadoop.mapred.TaskCompletionEvent.Status, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TaskCompletionEvent" type="int, org.apache.hadoop.mapred.TaskAttemptID, int, boolean, org.apache.hadoop.mapred.TaskCompletionEvent.Status, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor. eventId should be created externally and incremented
+ per event for each job.
+ @param eventId event id, event id should be unique and assigned in
+ incrementally, starting from 0.
+ @param taskId task id
+ @param status task's status
+ @param taskTrackerHttp task tracker's host:port for http.]]>
+ </doc>
+ </constructor>
+ <method name="getEventId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns event Id.
+ @return event id]]>
+ </doc>
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getTaskAttemptId()} instead.">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id
+ @deprecated use {@link #getTaskAttemptId()} instead.]]>
+ </doc>
+ </method>
+ <method name="getTaskAttemptId" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id]]>
+ </doc>
+ </method>
+ <method name="getTaskStatus" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns enum Status.SUCESS or Status.FAILURE.
+ @return task tracker status]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerHttp" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[http location of the tasktracker where this task ran.
+ @return http location of tasktracker user logs]]>
+ </doc>
+ </method>
+ <method name="getTaskRunTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns time (in millisec) the task took to complete.]]>
+ </doc>
+ </method>
+ <method name="setTaskRunTime"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskCompletionTime" type="int"/>
+ <doc>
+ <![CDATA[Set the task completion time
+ @param taskCompletionTime time (in millisec) the task took to complete]]>
+ </doc>
+ </method>
+ <method name="setEventId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="eventId" type="int"/>
+ <doc>
+ <![CDATA[set event Id. should be assigned incrementally starting from 0.
+ @param eventId]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #setTaskID(TaskAttemptID)} instead.">
+ <param name="taskId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId
+ @deprecated use {@link #setTaskID(TaskAttemptID)} instead.]]>
+ </doc>
+ </method>
+ <method name="setTaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId]]>
+ </doc>
+ </method>
+ <method name="setTaskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"/>
+ <doc>
+ <![CDATA[Set task status.
+ @param status]]>
+ </doc>
+ </method>
+ <method name="setTaskTrackerHttp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTrackerHttp" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set task tracker http location.
+ @param taskTrackerHttp]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isMapTask" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="idWithinJob" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="EMPTY_ARRAY" type="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is used to track task completion events on
+ job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <class name="TaskCompletionEvent.Status" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.TaskCompletionEvent.Status&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <!-- start class org.apache.hadoop.mapred.TaskID -->
+ <class name="TaskID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskID" type="org.apache.hadoop.mapred.JobID, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskID object from given {@link JobID}.
+ @param jobId JobID that this tip belongs to
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskID" type="java.lang.String, int, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskInProgressId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link JobID} object that this tip belongs to]]>
+ </doc>
+ </method>
+ <method name="isMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns whether this TaskID is a map ID]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare TaskInProgressIds by first jobIds, then by tip numbers. Reduces are
+ defined as greater then maps.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskID object from given string
+ @return constructed TaskID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getTaskIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <param name="isMap" type="java.lang.Boolean"/>
+ <param name="taskId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>the first map task</i>
+ of <i>any jobtracker</i>, of <i>any job</i>, we would use :
+ <pre>
+ TaskID.getTaskIDsPattern(null, null, true, 1);
+ </pre>
+ which will return :
+ <pre> "task_[^_]*_[0-9]*_m_000001*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null
+ @param taskId taskId number, or null
+ @return a regex pattern matching TaskIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[TaskID represents the immutable and unique identifier for
+ a Map or Reduce Task. Each TaskID encompasses multiple attempts made to
+ execute the Map or Reduce Task, each of which are uniquely indentified by
+ their TaskAttemptID.
+
+ TaskID consists of 3 parts. First part is the {@link JobID}, that this
+ TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r'
+ representing whether the task is a map task or a reduce task.
+ And the third part is the task number. <br>
+ An example TaskID is :
+ <code>task_200707121733_0003_m_000005</code> , which represents the
+ fifth map task in the third job running at the jobtracker
+ started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskID -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog -->
+ <class name="TaskLog" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLog"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskLogFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="getTaskLogFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logsRetainHours" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Purge old user logs.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskLogLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the desired maximum length of task's logs.
+ @param conf the job to look in
+ @return the number of bytes to cap the log files at]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ If the tailLength is 0, the entire output will be saved.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="setup" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ Setup commands such as setting memory limit can be passed which
+ will be executed before exec.
+ If the tailLength is 0, the entire output will be saved.
+ @param setup The setup commands for the execed process.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="addCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="isExecutable" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add quotes to each of the command strings and
+ return as a single string
+ @param cmd The command to be quoted
+ @param isExecutable makes shell path if the first
+ argument is executable
+ @return returns The quoted string.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="captureDebugOut" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="debugoutFilename" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture debug script's
+ stdout and stderr to debugout.
+ @param cmd The command and the arguments that should be run
+ @param debugoutFilename The filename that stdout and stderr
+ should be saved to.
+ @return the modified command that should be run
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple logger to handle the task-specific user logs.
+ This class uses the system property <code>hadoop.log.dir</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <class name="TaskLog.LogName" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.TaskLog.LogName&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskLog.LogName[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskLog.LogName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The filter for userlogs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogAppender -->
+ <class name="TaskLogAppender" extends="org.apache.log4j.FileAppender"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogAppender"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="activateOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Getter/Setter methods for log4j.]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ </method>
+ <method name="getTotalLogFileSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setTotalLogFileSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logSize" type="long"/>
+ </method>
+ <doc>
+ <![CDATA[A simple log4j-appender for the task child's
+ map-reduce system logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogAppender -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogServlet -->
+ <class name="TaskLogServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the logs via http.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A servlet that is run by the TaskTrackers to provide the task logs via http.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskReport -->
+ <class name="TaskReport" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskReport"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getTaskID()} instead">
+ <doc>
+ <![CDATA[@deprecated use {@link #getTaskID()} instead]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The id of the task.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The amount completed, between zero and one.]]>
+ </doc>
+ </method>
+ <method name="getState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The most recent state, reported by a {@link Reporter}.]]>
+ </doc>
+ </method>
+ <method name="getDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A list of error messages.]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A table of counters.]]>
+ </doc>
+ </method>
+ <method name="getFinishTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get finish time of task.
+ @return 0, if finish time was not set else returns finish time.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get start time of task.
+ @return 0 if start time was not set, else start time.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A report on the state of a task.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskReport -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker -->
+ <class name="TaskTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.TaskUmbilicalProtocol"/>
+ <implements name="java.lang.Runnable"/>
+ <constructor name="TaskTracker" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start with the local machine name, and the default JobTracker]]>
+ </doc>
+ </constructor>
+ <method name="getTaskTrackerMetrics" return="org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cleanupStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Removes all contents of temporary storage. Called upon
+ startup, to remove any leftovers from previous run.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close down the TaskTracker and all its components. We must also shutdown
+ any running tasks or threads, and cleanup disk space. A new TaskTracker
+ within the same process space might be restarted, so everything must be
+ clean.]]>
+ </doc>
+ </method>
+ <method name="getJobClient" return="org.apache.hadoop.mapred.InterTrackerProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The connection to the JobTracker, used by the TaskRunner
+ for locating remote files.]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerReportAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the port at which the tasktracker bound to]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The server retry loop.
+ This while-loop attempts to connect to the JobTracker. It only
+ loops when the old TaskTracker has gone bad (its state is
+ stale somehow) and we need to reinitialize everything.]]>
+ </doc>
+ </method>
+ <method name="getTask" return="org.apache.hadoop.mapred.Task"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTask" return="org.apache.hadoop.mapred.Task"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called upon startup by the child process, to fetch Task data.]]>
+ </doc>
+ </method>
+ <method name="statusUpdate" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="status" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="statusUpdate" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskStatus" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called periodically to report Task progress, from 0.0 to 1.0.]]>
+ </doc>
+ </method>
+ <method name="reportDiagnosticInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="info" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportDiagnosticInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="info" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when the task dies before completion, and we want to report back
+ diagnostic info]]>
+ </doc>
+ </method>
+ <method name="ping" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ping" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Child checking to see if we're alive. Normally does nothing.]]>
+ </doc>
+ </method>
+ <method name="done"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="shouldPromote" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="done"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldPromote" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The task is done.]]>
+ </doc>
+ </method>
+ <method name="shuffleError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="shuffleError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A reduce-task failed to shuffle the map-outputs. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="fsError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="fsError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A child task had a local filesystem error. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="getMapCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="fromid" type="int"/>
+ <param name="maxlocs" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMapCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxLocs" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mapOutputLost"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mapOutputLost"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="errorMsg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A completed map task's output has been lost.]]>
+ </doc>
+ </method>
+ <method name="isIdle" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this task tracker idle?
+ @return has this task tracker finished and cleaned up all of its tasks?]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Start the TaskTracker, point toward the indicated JobTracker]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[TaskTracker is a process that starts and tracks MR Tasks
+ in a networked environment. It contacts the JobTracker
+ for Task assignments and reporting results.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.Child -->
+ <class name="TaskTracker.Child" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskTracker.Child"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ </method>
+ <doc>
+ <![CDATA[The main() for child processes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.Child -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <class name="TaskTracker.MapOutputServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskTracker.MapOutputServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in TaskTracker's Jetty to serve the map outputs
+ to other nodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics -->
+ <class name="TaskTracker.TaskTrackerMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics -->
+ <!-- start class org.apache.hadoop.mapred.TextInputFormat -->
+ <class name="TextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="TextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Keys are
+ the position in the file, and values are the line of text..]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat -->
+ <class name="TextOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes plain text files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+ <class name="TextOutputFormat.LineRecordWriter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"/>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+</package>
+<package name="org.apache.hadoop.mapred.jobcontrol">
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <class name="Job" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf, java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+ @param jobConf a mapred job configuration representing a job to be executed.
+ @param dependingJobs an array of jobs the current job depends on]]>
+ </doc>
+ </constructor>
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+
+ @param jobConf mapred job configuration representing a job to be executed.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job name of this job]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job name for this job.
+ @param jobName the job name]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job ID of this job assigned by JobControl]]>
+ </doc>
+ </method>
+ <method name="setJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job ID for this job.
+ @param id the job ID]]>
+ </doc>
+ </method>
+ <method name="getMapredJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getAssignedJobID()} instead">
+ <doc>
+ <![CDATA[@return the mapred ID of this job
+ @deprecated use {@link #getAssignedJobID()} instead]]>
+ </doc>
+ </method>
+ <method name="setMapredJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #setAssignedJobID(JobID)} instead">
+ <param name="mapredJobID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job.
+ @param mapredJobID the mapred job ID for this job.
+ @deprecated use {@link #setAssignedJobID(JobID)} instead]]>
+ </doc>
+ </method>
+ <method name="getAssignedJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred ID of this job as assigned by the
+ mapred framework.]]>
+ </doc>
+ </method>
+ <method name="setAssignedJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mapredJobID" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job as assigned by the
+ mapred framework.
+ @param mapredJobID the mapred job ID for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred job conf of this job]]>
+ </doc>
+ </method>
+ <method name="setJobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Set the mapred job conf for this job.
+ @param jobConf the mapred job conf for this job.]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the state of this job]]>
+ </doc>
+ </method>
+ <method name="setState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Set the state for this job.
+ @param state the new state for this job.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the message of this job]]>
+ </doc>
+ </method>
+ <method name="setMessage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="message" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the message for this job.
+ @param message the message for this job.]]>
+ </doc>
+ </method>
+ <method name="getDependingJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the depending jobs of this job]]>
+ </doc>
+ </method>
+ <method name="addDependingJob" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dependingJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a job to this jobs' dependency list. Dependent jobs can only be added while a Job
+ is waiting to run, not during or afterwards.
+
+ @param dependingJob Job that this Job depends on.
+ @return <tt>true</tt> if the Job was added.]]>
+ </doc>
+ </method>
+ <method name="isCompleted" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in a complete state]]>
+ </doc>
+ </method>
+ <method name="isReady" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in READY state]]>
+ </doc>
+ </method>
+ <method name="submit"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Submit this job to mapred. The state becomes RUNNING if submission
+ is successful, FAILED otherwise.]]>
+ </doc>
+ </method>
+ <field name="SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WAITING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEPENDENT_FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class encapsulates a MapReduce job and its dependency. It monitors
+ the states of the depending jobs and updates the state of this job.
+ A job stats in the WAITING state. If it does not have any deoending jobs, or
+ all of the depending jobs are in SUCCESS state, then the job state will become
+ READY. If any depending jobs fail, the job will fail too.
+ When in READY state, the job can be submitted to Hadoop for execution, with
+ the state changing into RUNNING state. From RUNNING state, the job can get into
+ SUCCESS or FAILED state, depending the status of the jon execution.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+ <class name="JobControl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobControl" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a job control for a group of jobs.
+ @param groupName a name identifying this group]]>
+ </doc>
+ </constructor>
+ <method name="getWaitingJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the waiting state]]>
+ </doc>
+ </method>
+ <method name="getRunningJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the running state]]>
+ </doc>
+ </method>
+ <method name="getReadyJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the ready state]]>
+ </doc>
+ </method>
+ <method name="getSuccessfulJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the success state]]>
+ </doc>
+ </method>
+ <method name="getFailedJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="addJob" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="aJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a new job.
+ @param aJob the new job]]>
+ </doc>
+ </method>
+ <method name="addJobs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobs" type="java.util.Collection&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"/>
+ <doc>
+ <![CDATA[Add a collection of jobs
+
+ @param jobs]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the thread state]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[set the thread state to STOPPING so that the
+ thread will stop when it wakes up.]]>
+ </doc>
+ </method>
+ <method name="suspend"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[suspend the running thread]]>
+ </doc>
+ </method>
+ <method name="resume"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[resume the suspended thread]]>
+ </doc>
+ </method>
+ <method name="allFinished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The main loop for the thread.
+ The loop does the following:
+ Check the states of the running jobs
+ Update the states of waiting jobs
+ Submit the jobs in ready state]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a set of MapReduce jobs and its dependency. It tracks
+ the states of the jobs by placing them into different tables according to their
+ states.
+
+ This class provides APIs for the client app to add a job to the group and to get
+ the jobs in the group in different states. When a
+ job is added, an ID unique to the group is assigned to the job.
+
+ This class has a thread that submits jobs when they become ready, monitors the
+ states of the running jobs, and updates the states of jobs based on the state changes
+ of their depending jobs states. The class provides APIs for suspending/resuming
+ the thread,and for stopping the thread.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+</package>
+<package name="org.apache.hadoop.mapred.join">
+ <!-- start class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <class name="ArrayListBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"/>
+ <constructor name="ArrayListBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayListBackedIterator" type="java.util.ArrayList&lt;X&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. The
+ implementation uses an {@link java.util.ArrayList} to store elements
+ added to it, replaying them as requested.
+ Prefer {@link StreamBackedIterator}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <interface name="ComposableInputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Refinement of InputFormat requiring implementors to provide
+ ComposableRecordReader instead of RecordReader.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <interface name="ComposableRecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"/>
+ <implements name="java.lang.Comparable&lt;org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;&gt;"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key this RecordReader would supply on a call to next(K,V)]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RecordReader into the object provided.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the stream is not empty, but provides no guarantee that
+ a call to next(K,V) will succeed.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[While key-value pairs from this RecordReader match the given key, register
+ them with the JoinCollector provided.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Additional operations required of a RecordReader to participate in a join.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <class name="CompositeInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="CompositeInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Interpret a given string as a composite expression.
+ {@code
+ func ::= <ident>([<func>,]*<func>)
+ func ::= tbl(<class>,"<path>")
+ class ::= @see java.lang.Class#forName(java.lang.String)
+ path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String)
+ }
+ Reads expression from the <tt>mapred.join.expr</tt> property and
+ user-supplied join types from <tt>mapred.join.define.&lt;ident&gt;</tt>
+ types. Paths supplied to <tt>tbl</tt> are given as input paths to the
+ InputFormat class listed.
+ @see #compose(java.lang.String, java.lang.Class, java.lang.String...)]]>
+ </doc>
+ </method>
+ <method name="addDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds the default set of identifiers to the parser.]]>
+ </doc>
+ </method>
+ <method name="validateInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify that this composite has children and that all its children
+ can validate their input.]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a CompositeInputSplit from the child InputFormats by assigning the
+ ith split from each child to the ith composite split.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a CompositeRecordReader for the children of this InputFormat
+ as defined in the init expression.
+ The outermost join need only be composable, not necessarily a composite.
+ Mandating TupleWritable isn't strictly correct.]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given InputFormat class (inf), path (p) return:
+ {@code tbl(<inf>, <p>) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An InputFormat capable of performing joins over a set of data sources sorted
+ and partitioned the same way.
+ @see #setFormat
+
+ A user may define new join types by setting the property
+ <tt>mapred.join.define.&lt;ident&gt;</tt> to a classname. In the expression
+ <tt>mapred.join.expr</tt>, the identifier will be assumed to be a
+ ComposableRecordReader.
+ <tt>mapred.join.keycomparator</tt> can be a classname used to compare keys
+ in the join.
+ @see JoinRecordReader
+ @see MultiFilterRecordReader]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <class name="CompositeInputSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="CompositeInputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CompositeInputSplit" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.mapred.InputSplit"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an InputSplit to this collection.
+ @throws IOException If capacity was not specified during construction
+ or if capacity has been reached.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the aggregate length of all child InputSplits currently added.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the length of ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Collect a set of hosts from all child InputSplits.]]>
+ </doc>
+ </method>
+ <method name="getLocation" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[getLocations from ith InputSplit.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write splits in the following format.
+ {@code
+ <count><class1><class2>...<classn><split1><split2>...<splitn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}
+ @throws IOException If the child InputSplit cannot be read, typically
+ for faliing access checks.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This InputSplit contains a set of child InputSplits. Any InputSplit inserted
+ into this collection must have a public default constructor.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <class name="CompositeRecordReader" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="CompositeRecordReader" type="int, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a RecordReader with <tt>capacity</tt> children to position
+ <tt>id</tt> in the parent reader.
+ The id of a root CompositeRecordReader is -1 by convention, but relying
+ on this is not recommended.]]>
+ </doc>
+ </constructor>
+ <method name="combine" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ </method>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordReaderQueue" return="java.util.PriorityQueue&lt;org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return sorted list of RecordReaders for this composite.]]>
+ </doc>
+ </method>
+ <method name="getComparator" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return comparator defining the ordering for RecordReaders in this
+ composite.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rr" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ? extends V&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a RecordReader to this collection.
+ The id() of a RecordReader determines where in the Tuple its
+ entry will appear. Adding RecordReaders with the same id has
+ undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key for the current join or the value at the top of the
+ RecordReader heap.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the top of this RR into the given object.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if it is possible that this could emit more values.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Pass skip key to child RRs.]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Obtain an iterator over the child RRs apropos of the value type
+ ultimately emitted from this join.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If key provided matches that of this Composite, give JoinCollector
+ iterator over values it may emit.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For all child RRs offering the key provided, obtain an iterator
+ at that position in the JoinCollector.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key of join or head of heap
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new key value common to all child RRs.
+ @throws ClassCastException if key classes differ.]]>
+ </doc>
+ </method>
+ <method name="createInternalValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a value to be used internally for joins.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unsupported (returns zero in all cases).]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all child RRs.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report progress as the minimum of all child RR progress.]]>
+ </doc>
+ </method>
+ <field name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, V, X&gt;.JoinCollector"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="kids" type="org.apache.hadoop.mapred.join.ComposableRecordReader[]"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A RecordReader that can effect joins of RecordReaders sharing a common key
+ type and partitioning.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <class name="InnerJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader&lt;K&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Return true iff the tuple is full (all data sources contain this key).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full inner join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <class name="JoinRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, org.apache.hadoop.io.Writable, org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="JoinRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Emit the next set of key, value pairs as defined by the child
+ RecordReaders and operation associated with this composite RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator wrapping the JoinCollector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite joins returning Tuples of arbitrary Writables.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <class name="JoinRecordReader.JoinDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="JoinRecordReader.JoinDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Since the JoinCollector is effecting our operation, we need only
+ provide an iterator proxy wrapping its operation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <class name="MultiFilterRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, V, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, V&gt;"/>
+ <constructor name="MultiFilterRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="emit" return="V extends org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For each tuple emitted, return a value (typically one of the values
+ in the tuple).
+ Modifying the Writables in the tuple is permitted and unlikely to affect
+ join behavior in most cases, but it is not recommended. It's safer to
+ clone first.]]>
+ </doc>
+ </method>
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Default implementation offers {@link #emit} every Tuple from the
+ collector (the outer join of child RRs).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createValue" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator returning a single value from the tuple.
+ @see MultiFilterDelegationIterator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite join returning values derived from multiple
+ sources, but generally not tuples.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <class name="MultiFilterRecordReader.MultiFilterDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;V&gt;"/>
+ <constructor name="MultiFilterRecordReader.MultiFilterDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy the JoinCollector, but include callback to emit.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <class name="OuterJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader&lt;K&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit everything from the collector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full outer join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <class name="OverrideRecordReader" extends="org.apache.hadoop.mapred.join.MultiFilterRecordReader&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="emit" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit the value with the highest position in the tuple.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instead of filling the JoinCollector with iterators from all
+ data sources, fill only the rightmost for this key.
+ This not only saves space by discarding the other sources, but
+ it also emits the number of key-value pairs in the preferred
+ RecordReader instead of repeating that stream n times, where
+ n is the cardinality of the cross product of the discarded
+ streams for the given key.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Prefer the &quot;rightmost&quot; data source for this key.
+ For example, <tt>override(S1,S2,S3)</tt> will prefer values
+ from S3 over S2, and values from S2 over S1 for all keys
+ emitted from all sources.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser -->
+ <class name="Parser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Very simple shift-reduce parser for join expressions.
+
+ This should be sufficient for the user extension permitted now, but ought to
+ be replaced with a parser generator if more complex grammars are supported.
+ In particular, this &quot;shift-reduce&quot; parser has no states. Each set
+ of formals requires a different internal node type, which is responsible for
+ interpreting the list of tokens it receives. This is sufficient for the
+ current grammar, but it has several annoying properties that might inhibit
+ extension. In particular, parenthesis are always function calls; an
+ algebraic or filter grammar would not only require a node type, but must
+ also work around the internals of this parser.
+
+ For most other cases, adding classes to the hierarchy- particularly by
+ extending JoinRecordReader and MultiFilterRecordReader- is fairly
+ straightforward. One need only override the relevant method(s) (usually only
+ {@link CompositeRecordReader#combine}) and include a property to map its
+ value to an identifier in the parser.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Node -->
+ <class name="Parser.Node" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat"/>
+ <constructor name="Parser.Node" type="java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addIdentifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="ident" type="java.lang.String"/>
+ <param name="mcstrSig" type="java.lang.Class[]"/>
+ <param name="nodetype" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.join.Parser.Node&gt;"/>
+ <param name="cl" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.join.ComposableRecordReader&gt;"/>
+ <exception name="NoSuchMethodException" type="java.lang.NoSuchMethodException"/>
+ <doc>
+ <![CDATA[For a given identifier, add a mapping to the nodetype for the parse
+ tree and to the ComposableRecordReader to be created, including the
+ formals required to invoke the constructor.
+ The nodetype and constructor signature should be filled in from the
+ child node.]]>
+ </doc>
+ </method>
+ <method name="setID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="int"/>
+ </method>
+ <method name="setKeyComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="cmpcl" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"/>
+ </method>
+ <field name="rrCstrMap" type="java.util.Map&lt;java.lang.String, java.lang.reflect.Constructor&lt;? extends org.apache.hadoop.mapred.join.ComposableRecordReader&gt;&gt;"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ident" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="cmpcl" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Node -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <class name="Parser.NodeToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <class name="Parser.NumToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.NumToken" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <class name="Parser.StrToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.StrToken" type="org.apache.hadoop.mapred.join.Parser.TType, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Token -->
+ <class name="Parser.Token" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getType" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Tagged-union type for tokens from the join expression.
+ @see Parser.TType]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Token -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.TType -->
+ <class name="Parser.TType" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.join.Parser.TType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.join.Parser.TType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.TType -->
+ <!-- start interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <interface name="ResetableIterator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True iff a call to next will succeed.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign next value to actual.
+ It is required that elements added to a ResetableIterator be returned in
+ the same order after a call to {@link #reset} (FIFO).
+
+ Note that a call to this may fail for nested joins (i.e. more elements
+ available, but none satisfying the constraints of the join)]]>
+ </doc>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign last value returned to actual.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set iterator to return to the start of its range. Must be called after
+ calling {@link #add} to avoid a ConcurrentModificationException.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an element to the collection of elements to iterate over.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close datasources and release resources. Calling methods on the iterator
+ after calling close has undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Close datasources, but do not release internal resources. Calling this
+ method should permit the object to be reused with a different datasource.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This defines an interface to a stateful Iterator that can replay elements
+ added to it directly.
+ Note that this does not extend {@link java.util.Iterator}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <class name="ResetableIterator.EMPTY" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;U&gt;"/>
+ <constructor name="ResetableIterator.EMPTY"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <!-- start class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <class name="StreamBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"/>
+ <constructor name="StreamBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. This
+ implementation uses a byte array to store elements added to it.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.TupleWritable -->
+ <class name="TupleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="TupleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty tuple with no allocated storage for writables.]]>
+ </doc>
+ </constructor>
+ <constructor name="TupleWritable" type="org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Initialize tuple with storage; unknown whether any of them contain
+ &quot;written&quot; values.]]>
+ </doc>
+ </constructor>
+ <method name="has" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Return true if tuple has an element at the position provided.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith Writable from Tuple.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of children in this Tuple.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator over the elements in this tuple.
+ Note that this doesn't flatten the tuple; one may receive tuples
+ from this iterator.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert Tuple to String as in the following.
+ <tt>[<child1>,<child2>,...,<childn>]</tt>]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes each Writable to <code>out</code>.
+ TupleWritable format:
+ {@code
+ <count><type1><type2>...<typen><obj1><obj2>...<objn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writable type storing multiple {@link org.apache.hadoop.io.Writable}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.TupleWritable -->
+ <!-- start class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+ <class name="WrappedRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, U&gt;"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key at the head of this RR.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RR into the object supplied.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if the RR- including the k,v pair stored in this object-
+ is exhausted.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next k,v pair into the head of this object; return true iff
+ the RR and this are exhausted.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an iterator to the collector at the position occupied by this
+ RecordReader over the values in this stream paired with the key
+ provided (ie register a stream of values from this source matching K
+ with a collector).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write key-value pair at the head of this stream to the objects provided;
+ get next key-value pair from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new key from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="U extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new value from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request progress from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request position from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Forward close request to proxied RR.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key at head of proxied RR
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true iff compareTo(other) retn true.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy class for a RecordReader participating in the join framework.
+ This class keeps track of the &quot;head&quot; key-value pair for the
+ provided RecordReader and keeps a store of values matching a key when
+ this source is participating in a join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+</package>
+<package name="org.apache.hadoop.mapred.lib">
+ <!-- start class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <class name="FieldSelectionMapReduce" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="FieldSelectionMapReduce"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="val" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to output.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements a mapper/reducer class that can be used to perform
+ field selections in a manner similar to unix cut. The input data is treated
+ as fields separated by a user specified separator (the default value is
+ "\t"). The user can specify a list of fields that form the map output keys,
+ and a list of fields that form the map output values. If the inputformat is
+ TextInputFormat, the mapper will ignore the key to the map function. and the
+ fields are from the value only. Otherwise, the fields are the union of those
+ from the key and those from the value.
+
+ The field separator is under attribute "mapred.data.field.separator"
+
+ The map output field list spec is under attribute "map.output.key.value.fields.spec".
+ The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
+ key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
+ Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
+ (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all
+ the fields starting from field 3. The open range field spec applies value fields only.
+ They have no effect on the key fields.
+
+ Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
+ and use fields 6,5,1,2,3,7 and above for values.
+
+ The reduce output field list spec is under attribute "reduce.output.key.value.fields.spec".
+
+ The reducer extracts output key/value pairs in a similar manner, except that
+ the key is never ignored.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <!-- start class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <class name="HashPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K2, V2&gt;"/>
+ <constructor name="HashPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numReduceTasks" type="int"/>
+ <doc>
+ <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partition keys by their {@link Object#hashCode()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <class name="IdentityMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, K, V&gt;"/>
+ <constructor name="IdentityMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="val" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, V&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to
+ output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implements the identity function, mapping inputs directly to outputs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <class name="IdentityReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;K, V, K, V&gt;"/>
+ <constructor name="IdentityReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="values" type="java.util.Iterator&lt;V&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, V&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes all keys and values directly to output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Performs no reduction, writing all input values directly to the output.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <class name="InverseMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, V, K&gt;"/>
+ <constructor name="InverseMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;V, K&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The inverse function. Input keys and values are swapped.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that swaps keys and values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <class name="KeyFieldBasedPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K2, V2&gt;"/>
+ <constructor name="KeyFieldBasedPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numReduceTasks" type="int"/>
+ <doc>
+ <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <class name="LongSumReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;K, org.apache.hadoop.io.LongWritable, K, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="LongSumReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Reducer} that sums long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <class name="MultipleOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a composite record writer that can write key/value data to different
+ output files
+
+ @param fs
+ the file system to use
+ @param job
+ the job conf for the job
+ @param name
+ the leaf file name for the output file (such as part-00000")
+ @param arg3
+ a progressable for reporting progress.
+ @return a composite record writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="generateLeafFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the leaf name for the output file name. The default behavior does
+ not change the leaf file name (such as part-00000)
+
+ @param name
+ the leaf file name for the output file
+ @return the given leaf file name]]>
+ </doc>
+ </method>
+ <method name="generateFileNameForKeyValue" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the file output file name based on the given key and the leaf file
+ name. The default behavior is that the file name does not depend on the
+ key.
+
+ @param key
+ the key of the output data
+ @param name
+ the leaf file name
+ @return generated file name]]>
+ </doc>
+ </method>
+ <method name="generateActualKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <doc>
+ <![CDATA[Generate the actual key from the given key/value. The default behavior is that
+ the actual key is equal to the given key
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual key derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="generateActualValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <doc>
+ <![CDATA[Generate the actual value from the given key and value. The default behavior is that
+ the actual value is equal to the given value
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual value derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="getInputFileBasedOutputFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the outfile name based on a given anme and the input file name. If
+ the map input file does not exists (i.e. this is not for a map only job),
+ the given name is returned unchanged. If the config value for
+ "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
+ name is returned unchanged. Otherwise, return a file name consisting of the
+ N trailing legs of the input file name where N is the config value for
+ "num.of.trailing.legs.to.use".
+
+ @param job
+ the job config
+ @param name
+ the output file name
+ @return the outfile name based on a given anme and the input file name.]]>
+ </doc>
+ </method>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param fs
+ the file system to use
+ @param job
+ a job conf object
+ @param name
+ the name of the file over which a record writer object will be
+ constructed
+ @param arg3
+ a progressable object
+ @return A RecordWriter object over the given file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This abstract class extends the OutputFormatBase, allowing to write the
+ output data to different output files. There are three basic use cases for
+ this class.
+
+ Case one: This class is used for a map reduce job with at least one reducer.
+ The reducer wants to write data to different files depending on the actual
+ keys. It is assumed that a key (or value) encodes the actual key (value)
+ and the desired location for the actual key (value).
+
+ Case two: This class is used for a map only job. The job wants to use an
+ output file name that is either a part of the input file name of the input
+ data, or some derivation of it.
+
+ Case three: This class is used for a map only job. The job wants to use an
+ output file name that depends on both the keys and the input file name,]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <class name="MultipleSequenceFileOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleSequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output data
+ to different output files in sequence file output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <class name="MultipleTextOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleTextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output
+ data to different output files in Text output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <class name="MultithreadedMapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="MultithreadedMapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Multithreaded implementation for @link org.apache.hadoop.mapred.MapRunnable.
+ <p>
+ It can be used instead of the default implementation,
+ @link org.apache.hadoop.mapred.MapRunner, when the Map operation is not CPU
+ bound in order to improve throughput.
+ <p>
+ Map implementations using this MapRunnable must be thread-safe.
+ <p>
+ The Map-Reduce job has to be configured to use this MapRunnable class (using
+ the JobConf.setMapRunnerClass method) and
+ the number of thread the thread-pool can use with the
+ <code>mapred.map.multithreadedrunner.threads</code> property, its default
+ value is 10 threads.
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <!-- start class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+ <class name="NLineInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="NLineInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically splits the set of input files for the job, splits N lines
+ of the input as one split.
+
+ @see org.apache.hadoop.mapred.FileInputFormat#getSplits(JobConf, int)]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[NLineInputFormat which splits N lines of input as one split.
+
+ In many "pleasantly" parallel applications, each process/mapper
+ processes the same input file (s), but with computations are
+ controlled by different parameters.(Referred to as "parameter sweeps").
+ One way to achieve this, is to specify a set of parameters
+ (one set per line) as input in a control file
+ (which is the input path to the map-reduce application,
+ where as the input dataset is specified
+ via a config variable in JobConf.).
+
+ The NLineInputFormat can be used in such applications, that splits
+ the input file such that by default, one line is fed as
+ a value to one map task, and key is the offset.
+ i.e. (k,v) is (LongWritable, Text).
+ The location hints will span the whole mapred cluster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <class name="NullOutputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="NullOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[Consume all outputs and put them in /dev/null.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <class name="RegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="RegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+ <class name="TokenCountMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="TokenCountMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that maps text values into <token,freq> pairs. Uses
+ {@link StringTokenizer} to break text into tokens.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+</package>
+<package name="org.apache.hadoop.mapred.lib.aggregate">
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <class name="DoubleValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="DoubleValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a double value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="double"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a double value.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getSum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up a sequence of double
+ values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <class name="LongValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the maximum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <class name="LongValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the minimum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <class name="LongValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getSum" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <class name="StringValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the biggest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <class name="StringValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the smallest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <class name="UniqValueCount" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="UniqValueCount"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UniqValueCount" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor
+ @param maxNum the limit in the number of unique values to keep.]]>
+ </doc>
+ </constructor>
+ <method name="setMaxItems" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <doc>
+ <![CDATA[Set the limit on the number of unique values
+ @param n the desired limit on the number of unique values
+ @return the new limit on the number of unique values]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return the number of unique objects aggregated]]>
+ </doc>
+ </method>
+ <method name="getUniqueItems" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the set of the unique objects]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of the unique objects. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that dedupes a sequence of objects.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <class name="UserDefinedValueAggregatorDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="UserDefinedValueAggregatorDescriptor" type="java.lang.String, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param className the class name of the user defined descriptor class
+ @param job a configure object used for decriptor configuration]]>
+ </doc>
+ </constructor>
+ <method name="createInstance" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="className" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create an instance of the given class
+ @param className the name of the class
+ @return a dynamically created instance of the given class]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pairs
+ by delegating the invocation to the real object.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a wrapper for a user defined value aggregator descriptor.
+ It servs two functions: One is to create an object of ValueAggregatorDescriptor from the
+ name of a user defined class that may be dynamically loaded. The other is to
+ deligate inviokations of generateKeyValPairs function to the created object.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <interface name="ValueAggregator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val the value to be added]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the agregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return an array of values as the outputs of the combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface defines the minimal protocol for value aggregators.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <class name="ValueAggregatorBaseDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="ValueAggregatorBaseDescriptor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="generateEntry" return="java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <param name="id" type="java.lang.String"/>
+ <param name="val" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @param id the aggregation id
+ @param val the val associated with the id to be aggregated
+ @return an Entry whose key is the aggregation id prefixed with
+ the aggregation type.]]>
+ </doc>
+ </method>
+ <method name="generateValueAggregator" return="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @return a value aggregator of the given type.]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate 1 or 2 aggregation-id/value pairs for the given key/value pair.
+ The first id will be of type LONG_VALUE_SUM, with "record_count" as
+ its aggregation id. If the input is a file split,
+ the second id of the same type will be generated too, with the file name
+ as its aggregation id. This achieves the behavior of counting the total number
+ of records in the input data, and the number of records in each input file.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[get the input file name.
+
+ @param job a job configuration object]]>
+ </doc>
+ </method>
+ <field name="UNIQ_VALUE_COUNT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VALUE_HISTOGRAM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputFile" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements the common functionalities of
+ the subclasses of ValueAggregatorDescriptor class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <class name="ValueAggregatorCombiner" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorCombiner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Combiner does not need to configure.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Combines values for a given key.
+ @param key the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values.
+ @param values the values to combine
+ @param output to collect combined values]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic combiner of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <interface name="ValueAggregatorDescriptor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pair.
+ This function is usually called by the mapper of an Aggregate based job.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configure the object
+
+ @param job
+ a JobConf object that may contain the information that can be used
+ to configure the object.]]>
+ </doc>
+ </method>
+ <field name="TYPE_SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ONE" type="org.apache.hadoop.io.Text"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This interface defines the contract a value aggregator descriptor must
+ support. Such a descriptor can be configured with a JobConf object. Its main
+ function is to generate a list of aggregation-id/value pairs. An aggregation
+ id encodes an aggregation type which is used to guide the way to aggregate
+ the value in the reduce/combiner phrase of an Aggregate based job.The mapper in
+ an Aggregate based map/reduce job may create one or more of
+ ValueAggregatorDescriptor objects at configuration time. For each input
+ key/value pair, the mapper will use those objects to create aggregation
+ id/value pairs.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <class name="ValueAggregatorJob" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorJob"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation. Generic hadoop
+ arguments are accepted.
+ @return a JobConf object ready for submission.
+
+ @throws IOException
+ @see GenericOptionsParser]]>
+ </doc>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setAggregatorDescriptors"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create and run an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the main class for creating a map/reduce job using Aggregate
+ framework. The Aggregate is a specialization of map/reduce framework,
+ specilizing for performing various simple aggregations.
+
+ Generally speaking, in order to implement an application using Map/Reduce
+ model, the developer is to implement Map and Reduce functions (and possibly
+ combine function). However, a lot of applications related to counting and
+ statistics computing have very similar characteristics. Aggregate abstracts
+ out the general patterns of these functions and implementing those patterns.
+ In particular, the package provides generic mapper/redducer/combiner classes,
+ and a set of built-in value aggregators, and a generic utility class that
+ helps user create map/reduce jobs using the generic class. The built-in
+ aggregators include:
+
+ sum over numeric values count the number of distinct values compute the
+ histogram of values compute the minimum, maximum, media,average, standard
+ deviation of numeric values
+
+ The developer using Aggregate will need only to provide a plugin class
+ conforming to the following interface:
+
+ public interface ValueAggregatorDescriptor { public ArrayList<Entry>
+ generateKeyValPairs(Object key, Object value); public void
+ configure(JobConfjob); }
+
+ The package also provides a base class, ValueAggregatorBaseDescriptor,
+ implementing the above interface. The user can extend the base class and
+ implement generateKeyValPairs accordingly.
+
+ The primary work of generateKeyValPairs is to emit one or more key/value
+ pairs based on the input key/value pair. The key in an output key/value pair
+ encode two pieces of information: aggregation type and aggregation id. The
+ value will be aggregated onto the aggregation id according the aggregation
+ type.
+
+ This class offers a function to generate a map/reduce job using Aggregate
+ framework. The function takes the following parameters: input directory spec
+ input format (text or sequence file) output directory a file specifying the
+ user plugin class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <class name="ValueAggregatorJobBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K1, V1, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="ValueAggregatorJobBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="logSpec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="aggregatorDescriptorList" type="java.util.ArrayList&lt;org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This abstract class implements some common functionalities of the
+ the generic mapper, reducer and combiner classes of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <class name="ValueAggregatorMapper" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[the map function. It iterates through the value aggregator descriptor
+ list to generate aggregation id/value pairs and emit them.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="org.apache.hadoop.io.Text"/>
+ <param name="arg1" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic mapper of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <class name="ValueAggregatorReducer" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param key
+ the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values. In effect, data
+ driven computing is achieved. It is assumed that each aggregator's
+ getReport method emits appropriate output for the aggregator. This
+ may be further customiized.
+ @value the values to be aggregated]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic reducer of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+ <class name="ValueHistogram" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="ValueHistogram"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add the given val to the aggregator.
+
+ @param val the value to be added. It is expected to be a string
+ in the form of xxxx\tnum, meaning xxxx has num occurrences.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this aggregator.
+ It includes the following basic statistics of the histogram:
+ the number of unique values
+ the minimum value
+ the media value
+ the maximum value
+ the average value
+ the standard deviation]]>
+ </doc>
+ </method>
+ <method name="getReportDetails" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a string representation of the list of value/frequence pairs of
+ the histogram]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a list value/frequence pairs.
+ The return value is expected to be used by the reducer.]]>
+ </doc>
+ </method>
+ <method name="getReportItems" return="java.util.TreeMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a TreeMap representation of the histogram]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that computes the
+ histogram of a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+</package>
+<package name="org.apache.hadoop.mapred.pipes">
+ <!-- start class org.apache.hadoop.mapred.pipes.Submitter -->
+ <class name="Submitter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Submitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExecutable" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the URI of the application's executable.
+ @param conf
+ @return the URI where the application's executable is located]]>
+ </doc>
+ </method>
+ <method name="setExecutable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="executable" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the URI for the application's executable. Normally this is a hdfs:
+ location.
+ @param conf
+ @param executable The URI of the application's executable.]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job is using a Java RecordReader.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordReader" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java RecordReader
+ @param conf the configuration to check
+ @return is it a Java RecordReader?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Mapper is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaMapper" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Mapper.
+ @param conf the configuration to check
+ @return is it a Java Mapper?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaReducer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Reducer is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaReducer" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Reducer.
+ @param conf the configuration to check
+ @return is it a Java Reducer?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job will use a Java RecordWriter.
+ @param conf the configuration to modify
+ @param value the new value to set]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordWriter" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Will the reduce use a Java RecordWriter?
+ @param conf the configuration to check
+ @return true, if the output of the job will be written by Java]]>
+ </doc>
+ </method>
+ <method name="getKeepCommandFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Does the user want to keep the command file for debugging? If this is
+ true, pipes will write a copy of the command data to a file in the
+ task directory named "downlink.data", which may be used to run the C++
+ program under the debugger. You probably also want to set
+ JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from
+ being deleted.
+ To run using the data file, set the environment variable
+ "hadoop.pipes.command.file" to point to the file.
+ @param conf the configuration to check
+ @return will the framework save the command file?]]>
+ </doc>
+ </method>
+ <method name="setKeepCommandFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether to keep the command file for debugging
+ @param conf the configuration to modify
+ @param keep the new value]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications
+ to the job to run under pipes are made to the configuration.
+ @param conf the job to submit to the cluster (MODIFIED)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Submit a pipes job based on the command line arguments.
+ @param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The main entry point and job submitter. It may either be used as a command
+ line-based or API-based method to launch Pipes jobs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.pipes.Submitter -->
+</package>
+<package name="org.apache.hadoop.metrics">
+ <!-- start class org.apache.hadoop.metrics.ContextFactory -->
+ <class name="ContextFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ContextFactory"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of ContextFactory]]>
+ </doc>
+ </constructor>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the named attribute, or null if there is no
+ attribute of that name.
+
+ @param attributeName the attribute name
+ @return the attribute value]]>
+ </doc>
+ </method>
+ <method name="getAttributeNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all the factory's attributes.
+
+ @return the attribute names]]>
+ </doc>
+ </method>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Sets the named factory attribute to the specified value, creating it
+ if it did not already exist. If the value is null, this is the same as
+ calling removeAttribute.
+
+ @param attributeName the attribute name
+ @param value the new attribute value]]>
+ </doc>
+ </method>
+ <method name="removeAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes the named attribute if it exists.
+
+ @param attributeName the attribute name]]>
+ </doc>
+ </method>
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <exception name="InstantiationException" type="java.lang.InstantiationException"/>
+ <exception name="IllegalAccessException" type="java.lang.IllegalAccessException"/>
+ <doc>
+ <![CDATA[Returns the named MetricsContext instance, constructing it if necessary
+ using the factory's current configuration attributes. <p/>
+
+ When constructing the instance, if the factory property
+ <i>contextName</i>.class</code> exists,
+ its value is taken to be the name of the class to instantiate. Otherwise,
+ the default is to create an instance of
+ <code>org.apache.hadoop.metrics.spi.NullContext</code>, which is a
+ dummy "no-op" context which will cause all metric data to be discarded.
+
+ @param contextName the name of the context
+ @return the named MetricsContext]]>
+ </doc>
+ </method>
+ <method name="getNullContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a "null" context - one which does nothing.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the singleton ContextFactory instance, constructing it if
+ necessary. <p/>
+
+ When the instance is constructed, this method checks if the file
+ <code>hadoop-metrics.properties</code> exists on the class path. If it
+ exists, it must be in the format defined by java.util.Properties, and all
+ the properties in the file are set as attributes on the newly created
+ ContextFactory instance.
+
+ @return the singleton ContextFactory instance]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factory class for creating MetricsContext objects. To obtain an instance
+ of this class, use the static <code>getFactory()</code> method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ContextFactory -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsContext -->
+ <interface name="MetricsContext" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.
+
+ @return the context name]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records as they are
+ updated.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free any data that the implementation
+ may have buffered for sending at the next timer event. It
+ is OK to call <code>startMonitoring()</code> again after calling
+ this.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and also frees any buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new MetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at regular time intervals, as
+ determined by the implementation-class specific configuration.
+
+ @param updater object to be run periodically; it should updated
+ some metrics records and then return]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_PERIOD" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default period in seconds at which data is sent to the metrics system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The main interface to the metrics package.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.MetricsException -->
+ <class name="MetricsException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException]]>
+ </doc>
+ </constructor>
+ <constructor name="MetricsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException
+
+ @param message an error message]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[General-purpose, unchecked metrics exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsException -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsRecord -->
+ <interface name="MetricsRecord" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value. The tagValue may be null,
+ which is treated the same as an empty String.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.
+
+ @param tagName name of a tag]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes, from the buffered data table, all rows having tags
+ that equal the tags that have been set on this record. For example,
+ if there are no tags on this record, all rows for this record name
+ would be removed. Or, if there is a single tag on this record, then
+ just rows containing a tag with the same name and value would be removed.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A named and optionally tagged set of records to be sent to the metrics
+ system. <p/>
+
+ A record name identifies the kind of data to be reported. For example, a
+ program reporting statistics relating to the disks on a computer might use
+ a record name "diskStats".<p/>
+
+ A record has zero or more <i>tags</i>. A tag has a name and a value. To
+ continue the example, the "diskStats" record might use a tag named
+ "diskName" to identify a particular disk. Sometimes it is useful to have
+ more than one tag, so there might also be a "diskType" with value "ide" or
+ "scsi" or whatever.<p/>
+
+ A record also has zero or more <i>metrics</i>. These are the named
+ values that are to be reported to the metrics system. In the "diskStats"
+ example, possible metric names would be "diskPercentFull", "diskPercentBusy",
+ "kbReadPerSecond", etc.<p/>
+
+ The general procedure for using a MetricsRecord is to fill in its tag and
+ metric values, and then call <code>update()</code> to pass the record to the
+ client library.
+ Metric data is not immediately sent to the metrics system
+ each time that <code>update()</code> is called.
+ An internal table is maintained, identified by the record name. This
+ table has columns
+ corresponding to the tag and the metric names, and rows
+ corresponding to each unique set of tag values. An update
+ either modifies an existing row in the table, or adds a new row with a set of
+ tag values that are different from all the other rows. Note that if there
+ are no tags, then there can be at most one row in the table. <p/>
+
+ Once a row is added to the table, its data will be sent to the metrics system
+ on every timer period, whether or not it has been updated since the previous
+ timer period. If this is inappropriate, for example if metrics were being
+ reported by some transient object in an application, the <code>remove()</code>
+ method can be used to remove the row and thus stop the data from being
+ sent.<p/>
+
+ Note that the <code>update()</code> method is atomic. This means that it is
+ safe for different threads to be updating the same metric. More precisely,
+ it is OK for different threads to call <code>update()</code> on MetricsRecord instances
+ with the same set of tag names and tag values. Different threads should
+ <b>not</b> use the same MetricsRecord instance at the same time.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsRecord -->
+ <!-- start class org.apache.hadoop.metrics.MetricsUtil -->
+ <class name="MetricsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to return the named context.
+ If the desired context cannot be created for any reason, the exception
+ is logged, and a null context is returned.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to create and return new metrics record instance within the
+ given context. This record is tagged with the host name.
+
+ @param context the context
+ @param recordName name of the record
+ @return newly created metrics record]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility class to simplify creation and reporting of hadoop metrics.
+
+ For examples of usage, see {@link org.apache.hadoop.dfs.DataNode}.
+ @see org.apache.hadoop.metrics.MetricsRecord
+ @see org.apache.hadoop.metrics.MetricsContext
+ @see org.apache.hadoop.metrics.ContextFactory]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsUtil -->
+ <!-- start interface org.apache.hadoop.metrics.Updater -->
+ <interface name="Updater" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Timer-based call-back from the metric library.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Call-back interface. See <code>MetricsContext.registerUpdater()</code>.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.Updater -->
+</package>
+<package name="org.apache.hadoop.metrics.file">
+ <!-- start class org.apache.hadoop.metrics.file.FileContext -->
+ <class name="FileContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="getFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the configured file name, or null.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, by opening in append-mode, the
+ file specified by the <code>fileName</code> attribute,
+ if specified. Otherwise the data will be written to standard
+ output.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring, closing the file.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Emits a metrics record to a file.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Flushes the output writer, forcing updates to disk.]]>
+ </doc>
+ </method>
+ <field name="FILE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="PERIOD_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Metrics context for writing metrics to a file.<p/>
+
+ This class is configured by setting ContextFactory attributes which in turn
+ are usually configured through a properties file. All the attributes are
+ prefixed by the contextName. For example, the properties file might contain:
+ <pre>
+ myContextName.fileName=/tmp/metrics.log
+ myContextName.period=5
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.file.FileContext -->
+</package>
+<package name="org.apache.hadoop.metrics.ganglia">
+ <!-- start class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+ <class name="GangliaContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GangliaContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of GangliaContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Context for sending metrics to Ganglia.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+</package>
+<package name="org.apache.hadoop.metrics.jvm">
+ <!-- start class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <class name="EventCounter" extends="org.apache.log4j.AppenderSkeleton"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="EventCounter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getFatal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getError" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWarn" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfo" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="requiresLayout" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A log4J Appender that simply counts logging events in three levels:
+ fatal, error and warn.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <!-- start class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+ <class name="JvmMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="init" return="org.apache.hadoop.metrics.jvm.JvmMetrics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="processName" type="java.lang.String"/>
+ <param name="sessionId" type="java.lang.String"/>
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[This will be called periodically (with the period being configuration
+ dependent).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Singleton class which eports Java Virtual Machine metrics to the metrics API.
+ Any application can create an instance of this class in order to emit
+ Java VM metrics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+</package>
+<package name="org.apache.hadoop.metrics.spi">
+ <!-- start class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <class name="AbstractMetricsContext" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsContext"/>
+ <constructor name="AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of AbstractMetricsContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ <doc>
+ <![CDATA[Initializes the context.]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for subclasses to access factory attributes.]]>
+ </doc>
+ </method>
+ <method name="getAttributeTable" return="java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="tableName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an attribute-value map derived from the factory attributes
+ by finding all factory attributes that begin with
+ <i>contextName</i>.<i>tableName</i>. The returned map consists of
+ those attributes with the contextName and tableName stripped off.]]>
+ </doc>
+ </method>
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.]]>
+ </doc>
+ </method>
+ <method name="getContextFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the factory by which this context was created.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free buffered data.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and frees buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new AbstractMetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="newRecord" return="org.apache.hadoop.metrics.spi.MetricsRecordImpl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Subclasses should override this if they subclass MetricsRecordImpl.
+ @param recordName the name of the record
+ @return newly created instance of MetricsRecordImpl or subclass]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at time intervals determined by
+ the configuration.
+
+ @param updater object to be run periodically; it should update
+ some metrics records]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sends a record to the metrics system.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called each period after all records have been emitted, this method does nothing.
+ Subclasses may override it in order to perform some kind of flush.]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.update(). Creates or updates a row in
+ the internal table of metric data.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.remove(). Removes all matching rows in
+ the internal table of metric data. A row matches if it has the same
+ tag names and values as record, but it may also have additional
+ tags.]]>
+ </doc>
+ </method>
+ <method name="getPeriod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the timer period.]]>
+ </doc>
+ </method>
+ <method name="setPeriod"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="period" type="int"/>
+ <doc>
+ <![CDATA[Sets the timer period]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The main class of the Service Provider Interface. This class should be
+ extended in order to integrate the Metrics API with a specific metrics
+ client library. <p/>
+
+ This class implements the internal table of metric data, and the timer
+ on which data is to be sent to the metrics system. Subclasses must
+ override the abstract <code>emitRecord</code> method in order to transmit
+ the data. <p/>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <class name="MetricsRecordImpl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsRecord"/>
+ <constructor name="MetricsRecordImpl" type="java.lang.String, org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileRecord]]>
+ </doc>
+ </constructor>
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes the row, if it exists, in the buffered data table having tags
+ that equal the tags that have been set on this record.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of MetricsRecord. Keeps a back-pointer to the context
+ from which it was created, and delegates back to it on <code>update</code>
+ and <code>remove()</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricValue -->
+ <class name="MetricValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricValue" type="java.lang.Number, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricValue]]>
+ </doc>
+ </constructor>
+ <method name="isIncrement" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumber" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="ABSOLUTE" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCREMENT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Number that is either an absolute or an incremental amount.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricValue -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContext -->
+ <class name="NullContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContext]]>
+ </doc>
+ </constructor>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do-nothing version of startMonitoring]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Null metrics context: a metrics context which does nothing. Used as the
+ default context, so that no performance data is emitted if no configuration
+ data is found.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <class name="NullContextWithUpdateThread" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContextWithUpdateThread"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContextWithUpdateThread]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A null context which has a thread calling
+ periodically when monitoring is started. This keeps the data sampled
+ correctly.
+ In all other respects, this is like the NULL context: No data is emitted.
+ This is suitable for Monitoring systems like JMX which reads the metrics
+ when someone reads the data from JMX.
+
+ The default impl of start and stop monitoring:
+ is the AbstractMetricsContext is good enough.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <!-- start class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <class name="OutputRecord" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTagNames" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of tag names]]>
+ </doc>
+ </method>
+ <method name="getTag" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a tag object which is can be a String, Integer, Short or Byte.
+
+ @return the tag value, or null if there is no such tag]]>
+ </doc>
+ </method>
+ <method name="getMetricNames" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of metric names.]]>
+ </doc>
+ </method>
+ <method name="getMetric" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the metric object which can be a Float, Integer, Short or Byte.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents a record of metric data to be sent to a metrics system.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <!-- start class org.apache.hadoop.metrics.spi.Util -->
+ <class name="Util" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="parse" return="java.util.List&lt;java.net.InetSocketAddress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="specs" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Parses a space and/or comma separated sequence of server specifications
+ of the form <i>hostname</i> or <i>hostname:port</i>. If
+ the specs string is null, defaults to localhost:defaultPort.
+
+ @return a list of InetSocketAddress objects.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Static utility methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.Util -->
+</package>
+<package name="org.apache.hadoop.metrics.util">
+ <!-- start class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <class name="MBeanUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MBeanUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="registerMBean" return="javax.management.ObjectName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="serviceName" type="java.lang.String"/>
+ <param name="nameName" type="java.lang.String"/>
+ <param name="theMbean" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Register the mbean using out standard MBeanName format
+ "hadoop.dfs:service=<serviceName>,name=<nameName>"
+ Where the <serviceName> and <nameName> are the supplied parameters
+
+ @param serviceName
+ @param nameName
+ @param theMbean - the MBean to register
+ @return the named used to register the MBean]]>
+ </doc>
+ </method>
+ <method name="unregisterMBean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mbeanName" type="javax.management.ObjectName"/>
+ </method>
+ <doc>
+ <![CDATA[This util class provides a method to register an MBean using
+ our standard naming convention as described in the doc
+ for {link {@link #registerMBean(String, String, Object)}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <class name="MetricsIntValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsIntValue" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="int"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - value to be added]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param decr - value to subtract]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Dec metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsIntValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsLongValue -->
+ <class name="MetricsLongValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsLongValue" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="long"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - value to be added]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decr" type="long"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param decr - value to subtract]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Dec metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsLongValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsLongValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <class name="MetricsTimeVaryingInt" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingInt" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - number of operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #previousIntervalValue}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Value at the Previous interval
+ @return prev interval value]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingInt class is for a metric that naturally
+ varies over time (e.g. number of files created).
+ The metric is is published at interval heart beat (the interval
+ is set in the metrics config file).
+ Note if one wants a time associated with the metric then use
+ @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+ <class name="MetricsTimeVaryingRate" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingRate" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param n the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numOps" type="int"/>
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for numOps operations
+ @param numOps - number of operations
+ @param time - time for numOps operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for one operation
+ @param time for one operation]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and
+ {@link #getPreviousIntervalNumOps()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalNumOps" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of operations in the previous interval
+ @return - ops in prev interval]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalAverageTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The average rate of an operation in the previous interval
+ @return - the average rate.]]>
+ </doc>
+ </method>
+ <method name="getMinTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The min time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return min time for an operation]]>
+ </doc>
+ </method>
+ <method name="getMaxTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The max time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return max time for an operation]]>
+ </doc>
+ </method>
+ <method name="resetMinMax"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the min max values]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingRate class is for a rate based metric that
+ naturally varies over time (e.g. time taken to create a file).
+ The rate is averaged at each interval heart beat (the interval
+ is set in the metrics config file).
+ This class also keeps track of the min and max rates along with
+ a method to reset the min-max.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+</package>
+<package name="org.apache.hadoop.net">
+ <!-- start class org.apache.hadoop.net.DNS -->
+ <class name="DNS" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DNS"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reverseDns" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hostIp" type="java.net.InetAddress"/>
+ <param name="ns" type="java.lang.String"/>
+ <exception name="NamingException" type="javax.naming.NamingException"/>
+ <doc>
+ <![CDATA[Returns the hostname associated with the specified IP address by the
+ provided nameserver.
+
+ @param hostIp
+ The address to reverse lookup
+ @param ns
+ The host name of a reachable DNS server
+ @return The host name associated with the provided IP
+ @throws NamingException
+ If a NamingException is encountered]]>
+ </doc>
+ </method>
+ <method name="getIPs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the IPs associated with the provided interface, if any, in
+ textual form.
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return A string vector of all the IPs associated with the provided
+ interface
+ @throws UnknownHostException
+ If an UnknownHostException is encountered in querying the
+ default interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultIP" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the first available IP address associated with the provided
+ network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The IP address in text form
+ @throws UnknownHostException
+ If one is encountered in querying the default interface]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the provided nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return A string vector of all host names associated with the IPs tied to
+ the specified interface
+ @throws UnknownHostException]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the default nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The list of host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the provided
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return The default host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the default
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The default host name associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides direct and reverse lookup functionalities, allowing
+ the querying of specific network interfaces or nameservers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.DNS -->
+ <!-- start interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <interface name="DNSToSwitchMapping" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="resolve" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List&lt;java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[Resolves a list of DNS-names/IP-addresses and returns back a list of
+ switch information (network paths). One-to-one correspondence must be
+ maintained between the elements in the lists.
+ Consider an element in the argument list - x.y.com. The switch information
+ that is returned must be a network path of the form /foo/rack,
+ where / is the root, and 'foo' is the switch where 'rack' is connected.
+ Note the hostname/ip-address is not part of the returned path.
+ The network topology of the cluster would determine the number of
+ components in the network path.
+ @param names
+ @return list of resolved network paths]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An interface that should be implemented to allow pluggable
+ DNS-name/IP-address to RackID resolvers.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <!-- start class org.apache.hadoop.net.NetUtils -->
+ <class name="NetUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="clazz" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Get the socket factory for the given class according to its
+ configuration parameter
+ <tt>hadoop.rpc.socket.factory.class.&lt;ClassName&gt;</tt>. When no
+ such parameter exists then fall back on the default socket factory as
+ configured by <tt>hadoop.rpc.socket.factory.class.default</tt>. If
+ this default socket factory is not configured, then fall back on the JVM
+ default socket factory.
+
+ @param conf the configuration
+ @param clazz the class (usually a {@link VersionedProtocol})
+ @return a socket factory]]>
+ </doc>
+ </method>
+ <method name="getDefaultSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default socket factory as specified by the configuration
+ parameter <tt>hadoop.rpc.socket.factory.default</tt>
+
+ @param conf the configuration
+ @return the default socket factory as specified in the configuration or
+ the JVM default socket factory if the configuration does not
+ contain a default socket factory property.]]>
+ </doc>
+ </method>
+ <method name="getSocketFactoryFromProperty" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="propValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the socket factory corresponding to the given proxy URI. If the
+ given proxy URI corresponds to an absence of configuration parameter,
+ returns null. If the URI is malformed raises an exception.
+
+ @param propValue the property which is the class name of the
+ SocketFactory to instantiate; assumed non null and non empty.
+ @return a socket factory as defined in the property value.]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="getServerAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="oldBindAddressName" type="java.lang.String"/>
+ <param name="oldPortName" type="java.lang.String"/>
+ <param name="newBindAddressName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Handle the transition from pairs of attributes specifying a host and port
+ to a single colon separated one.
+ @param conf the configuration to check
+ @param oldBindAddressName the old address attribute name
+ @param oldPortName the old port attribute name
+ @param newBindAddressName the new combined name
+ @return the complete address from the configuration]]>
+ </doc>
+ </method>
+ <method name="addStaticResolution"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="resolvedName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a static resolution for host. This can be used for setting up
+ hostnames with names that are fake to point to a well known host. For e.g.
+ in some testcases we require to have daemons with different hostnames
+ running on the same machine. In order to create connections to these
+ daemons, one can set up mappings from those hostnames to "localhost".
+ {@link NetUtils#getStaticResolution(String)} can be used to query for
+ the actual hostname.
+ @param host
+ @param resolvedName]]>
+ </doc>
+ </method>
+ <method name="getStaticResolution" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Retrieves the resolved name for the passed host. The resolved name must
+ have been set earlier using
+ {@link NetUtils#addStaticResolution(String, String)}
+ @param host
+ @return the resolution]]>
+ </doc>
+ </method>
+ <method name="getAllStaticResolutions" return="java.util.List&lt;java.lang.String[]&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is used to get all the resolutions that were added using
+ {@link NetUtils#addStaticResolution(String, String)}. The return
+ value is a List each element of which contains an array of String
+ of the form String[0]=hostname, String[1]=resolved-hostname
+ @return the list of resolutions]]>
+ </doc>
+ </method>
+ <method name="getConnectAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="server" type="org.apache.hadoop.ipc.Server"/>
+ <doc>
+ <![CDATA[Returns InetSocketAddress that a client can use to
+ connect to the server. Server.getListenerAddress() is not correct when
+ the server binds to "0.0.0.0". This returns "127.0.0.1:port" when
+ the getListenerAddress() returns "0.0.0.0:port".
+
+ @param server
+ @return socket address that a client can use to connect to the server.]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getInputStream(socket, socket.getSoTimeout()).<br><br>
+
+ From documentation for {@link #getInputStream(Socket, long)}:<br>
+ Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see #getInputStream(Socket, long)
+
+ @param socket
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getOutputStream(socket, 0). Timeout of zero implies write will
+ wait until data is available.<br><br>
+
+ From documentation for {@link #getOutputStream(Socket, long)} : <br>
+ Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see #getOutputStream(Socket, long)
+
+ @param socket
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetUtils -->
+ <!-- start class org.apache.hadoop.net.NetworkTopology -->
+ <class name="NetworkTopology" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetworkTopology"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Add a leaf node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be added
+ @exception IllegalArgumentException if add a node to a leave
+ or node to be added is not a leaf]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Remove a node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be removed]]>
+ </doc>
+ </method>
+ <method name="contains" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if the tree contains node <i>node</i>
+
+ @param node
+ a node
+ @return true if <i>node</i> is already in the tree; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="loc" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a string representation of a node, return its reference
+
+ @param loc
+ a path-like string representation of a node
+ @return a reference to the node; null if the node is not in the tree]]>
+ </doc>
+ </method>
+ <method name="getNumOfRacks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of racks]]>
+ </doc>
+ </method>
+ <method name="getNumOfLeaves" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of nodes]]>
+ </doc>
+ </method>
+ <method name="getDistance" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return the distance between two nodes
+ It is assumed that the distance from one node to its parent is 1
+ The distance between two nodes is calculated by summing up their distances
+ to their closest common ancestor.
+ @param node1 one node
+ @param node2 another node
+ @return the distance between node1 and node2
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="isOnSameRack" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if two nodes are on the same rack
+ @param node1 one node
+ @param node2 another node
+ @return true if node1 and node2 are pm the same rack; false otherwise
+ @exception IllegalArgumentException when either node1 or node2 is null, or
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="chooseRandom" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <doc>
+ <![CDATA[randomly choose one node from <i>scope</i>
+ if scope starts with ~, choose one from the all nodes except for the
+ ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
+ @param scope range of nodes from which a node will be choosen
+ @return the choosen node]]>
+ </doc>
+ </method>
+ <method name="countNumOfAvailableNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <param name="excludedNodes" type="java.util.List&lt;org.apache.hadoop.net.Node&gt;"/>
+ <doc>
+ <![CDATA[return the number of leaves in <i>scope</i> but not in <i>excludedNodes</i>
+ if scope starts with ~, return the number of nodes that are not
+ in <i>scope</i> and <i>excludedNodes</i>;
+ @param scope a path string that may start with ~
+ @param excludedNodes a list of nodes
+ @return number of available nodes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[convert a network tree to a string]]>
+ </doc>
+ </method>
+ <method name="pseudoSortByDistance"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reader" type="org.apache.hadoop.net.Node"/>
+ <param name="nodes" type="org.apache.hadoop.net.Node[]"/>
+ <doc>
+ <![CDATA[Sort nodes array by their distances to <i>reader</i>
+ It linearly scans the array, if a local node is found, swap it with
+ the first element of the array.
+ If a local rack node is found, swap it with the first element following
+ the local node.
+ If neither local node or local rack node is found, put a random replica
+ location at postion 0.
+ It leaves the rest nodes untouched.]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_RACK" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UNRESOLVED" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_HOST_LEVEL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The class represents a cluster of computer with a tree hierarchical
+ network topology.
+ For example, a cluster may be consists of many data centers filled
+ with racks of computers.
+ In a network topology, leaves represent data nodes (computers) and inner
+ nodes represent switches/routers that manage traffic in/out of data centers
+ or racks.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetworkTopology -->
+ <!-- start interface org.apache.hadoop.net.Node -->
+ <interface name="Node" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the string representation of this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the node's network location]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface defines a node in a network topology.
+ A node may be a leave representing a data node or an inner
+ node representing a datacenter or rack.
+ Each data has a name and its location in the network is
+ decided by a string with syntax similar to a file name.
+ For example, a data node's name is hostname:port# and if it's located at
+ rack "orange" in datacenter "dog", the string representation of its
+ network location is /dog/orange]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.Node -->
+ <!-- start class org.apache.hadoop.net.NodeBase -->
+ <class name="NodeBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.Node"/>
+ <constructor name="NodeBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its path
+ @param path
+ a concatenation of this node's location, the path seperator, and its name]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String, org.apache.hadoop.net.Node, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location
+ @param parent this node's parent node
+ @param level this node's level in the tree]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set this node's network location]]>
+ </doc>
+ </method>
+ <method name="getPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return this node's path]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's string representation]]>
+ </doc>
+ </method>
+ <method name="normalize" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Normalize a path]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="level" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree]]>
+ </doc>
+ </method>
+ <field name="PATH_SEPARATOR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PATH_SEPARATOR_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ROOT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="name" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="location" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="level" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="parent" type="org.apache.hadoop.net.Node"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class that implements interface Node]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NodeBase -->
+ <!-- start class org.apache.hadoop.net.ScriptBasedMapping -->
+ <class name="ScriptBasedMapping" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.net.DNSToSwitchMapping"/>
+ <constructor name="ScriptBasedMapping"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="resolve" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List&lt;java.lang.String&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[This class implements the {@link DNSToSwitchMapping} interface using a
+ script configured via topology.script.file.name .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.ScriptBasedMapping -->
+ <!-- start class org.apache.hadoop.net.SocketInputStream -->
+ <class name="SocketInputStream" extends="java.io.InputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.ReadableByteChannel"/>
+ <constructor name="SocketInputStream" type="java.nio.channels.ReadableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for reading, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), timeout): <br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), socket.getSoTimeout())
+ :<br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.ReadableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by inputstream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferFrom(ReadableByteChannel, long, long)}.]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="waitForReadable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[waits for the underlying channel to be ready for reading.
+ The timeout specified for this stream applies to this wait.
+
+ @throws SocketTimeoutException
+ if select on the channel times out.
+ @throws IOException
+ if any other I/O error occurs.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This implements an input stream that can have a timeout while reading.
+ This sets non-blocking flag on the socket channel.
+ So after create this object, read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} for the associated socket will throw
+ IllegalBlockingModeException.
+ Please use {@link SocketOutputStream} for writing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketInputStream -->
+ <!-- start class org.apache.hadoop.net.SocketOutputStream -->
+ <class name="SocketOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.WritableByteChannel"/>
+ <constructor name="SocketOutputStream" type="java.nio.channels.WritableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for writing, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketOutputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketOutputStream(socket.getChannel(), timeout):<br><br>
+
+ Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketOutputStream#SocketOutputStream(WritableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.WritableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by this stream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="waitForWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[waits for the underlying channel to be ready for writing.
+ The timeout specified for this stream applies to this wait.
+
+ @throws SocketTimeoutException
+ if select on the channel times out.
+ @throws IOException
+ if any other I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="transferToFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileCh" type="java.nio.channels.FileChannel"/>
+ <param name="position" type="long"/>
+ <param name="count" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Transfers data from FileChannel using
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}.
+
+ Similar to readFully(), this waits till requested amount of
+ data is transfered.
+
+ @param fileCh FileChannel to transfer data from.
+ @param position position within the channel where the transfer begins
+ @param count number of bytes to transfer.
+
+ @throws EOFException
+ If end of input file is reached before requested number of
+ bytes are transfered.
+
+ @throws SocketTimeoutException
+ If this channel blocks transfer longer than timeout for
+ this stream.
+
+ @throws IOException Includes any exception thrown by
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This implements an output stream that can have a timeout while writing.
+ This sets non-blocking flag on the socket channel.
+ So after creating this object , read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} on the associated socket will throw
+ llegalBlockingModeException.
+ Please use {@link SocketInputStream} for reading.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketOutputStream -->
+ <!-- start class org.apache.hadoop.net.SocksSocketFactory -->
+ <class name="SocksSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="SocksSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <constructor name="SocksSocketFactory" type="java.net.Proxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with a supplied Proxy
+
+ @param proxy the proxy to use to create sockets]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocksSocketFactory -->
+ <!-- start class org.apache.hadoop.net.StandardSocketFactory -->
+ <class name="StandardSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StandardSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.StandardSocketFactory -->
+</package>
+<package name="org.apache.hadoop.record">
+ <!-- start class org.apache.hadoop.record.BinaryRecordInput -->
+ <class name="BinaryRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="BinaryRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordInput" type="java.io.DataInput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inp" type="java.io.DataInput"/>
+ <doc>
+ <![CDATA[Get a thread-local record input for the supplied DataInput.
+ @param inp data input stream
+ @return binary record input corresponding to the supplied DataInput.]]>
+ </doc>
+ </method>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordInput -->
+ <!-- start class org.apache.hadoop.record.BinaryRecordOutput -->
+ <class name="BinaryRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="BinaryRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordOutput" type="java.io.DataOutput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <doc>
+ <![CDATA[Get a thread-local record output for the supplied DataOutput.
+ @param out data output stream
+ @return binary record output corresponding to the supplied DataOutput.]]>
+ </doc>
+ </method>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordOutput -->
+ <!-- start class org.apache.hadoop.record.Buffer -->
+ <class name="Buffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Buffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-count sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte array as the initial value.
+
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[], int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte range as the initial value.
+
+ @param bytes Copy of this array becomes the backing storage for the object.
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Use the specified bytes array as underlying sequence.
+
+ @param bytes byte sequence]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Copy the specified byte array to the Buffer. Replaces the current buffer.
+
+ @param bytes byte array to be assigned
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the Buffer.
+
+ @return The data is only valid between 0 and getCount() - 1.]]>
+ </doc>
+ </method>
+ <method name="getCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current count of the buffer.]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum count that could handled without
+ resizing the backing storage.
+
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newCapacity" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved if newCapacity >= getCount().
+ @param newCapacity The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the buffer to 0 size]]>
+ </doc>
+ </method>
+ <method name="truncate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Change the capacity of the backing store to be the same as the current
+ count of buffer.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer.
+
+ @param bytes byte array to be appended
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer
+
+ @param bytes byte array to be appended]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Define the sort order of the Buffer.
+
+ @param other The other buffer
+ @return Positive if this is bigger than other, 0 if they are equal, and
+ negative if this is smaller than other.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="charsetName" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ <doc>
+ <![CDATA[Convert the byte buffer to a string an specific character encoding
+
+ @param charsetName Valid Java Character Set Name]]>
+ </doc>
+ </method>
+ <method name="clone" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="CloneNotSupportedException" type="java.lang.CloneNotSupportedException"/>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is used as a Java native type for buffer.
+ It is resizable and distinguishes between the count of the seqeunce and
+ the current capacity.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Buffer -->
+ <!-- start class org.apache.hadoop.record.CsvRecordInput -->
+ <class name="CsvRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="CsvRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordInput -->
+ <!-- start class org.apache.hadoop.record.CsvRecordOutput -->
+ <class name="CsvRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="CsvRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordOutput -->
+ <!-- start interface org.apache.hadoop.record.Index -->
+ <interface name="Index" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="done" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="incr"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Interface that acts as an iterator for deserializing maps.
+ The deserializer returns an instance that the record uses to
+ read vectors and maps. An example of usage is as follows:
+
+ <code>
+ Index idx = startVector(...);
+ while (!idx.done()) {
+ .... // read element of a vector
+ idx.incr();
+ }
+ </code>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.Index -->
+ <!-- start class org.apache.hadoop.record.Record -->
+ <class name="Record" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Record"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="serialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record with tag (ususally field name)
+ @param rout Record output destination
+ @param tag record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record with a tag (usually field name)
+ @param rin Record input source
+ @param tag Record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record without a tag
+ @param rout Record output destination]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record without a tag
+ @param rin Record input source]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="din" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Abstract class that is extended by generated classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Record -->
+ <!-- start class org.apache.hadoop.record.RecordComparator -->
+ <class name="RecordComparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordComparator" type="java.lang.Class"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a raw {@link Record} comparison implementation.]]>
+ </doc>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.record.RecordComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link Record} implementation.
+
+ @param c record classs for which a raw comparator is provided
+ @param comparator Raw comparator instance for class c]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A raw record comparator base class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.RecordComparator -->
+ <!-- start interface org.apache.hadoop.record.RecordInput -->
+ <interface name="RecordInput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a byte from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a boolean from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a long integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a single-precision float from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a double-precision number from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read byte array from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of elements.]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of map entries.]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that all the Deserializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordInput -->
+ <!-- start interface org.apache.hadoop.record.RecordOutput -->
+ <interface name="RecordOutput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a byte to serialized record.
+ @param b Byte to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a boolean to serialized record.
+ @param b Boolean to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write an integer to serialized record.
+ @param i Integer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a long integer to serialized record.
+ @param l Long to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a single-precision float to serialized record.
+ @param f Float to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a double precision floating point number to serialized record.
+ @param d Double to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a unicode string to serialized record.
+ @param s String to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a buffer to serialized record.
+ @param buf Buffer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a record to be serialized.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized record.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a vector to be serialized.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized vector.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a map to be serialized.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized map.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that alll the serializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordOutput -->
+ <!-- start class org.apache.hadoop.record.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a stream and return it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a stream and returns it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an int to a binary stream with zero-compressed encoding.
+
+ @param stream Binary output stream
+ @param i int to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <field name="hexchars" type="char[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Utils -->
+ <!-- start class org.apache.hadoop.record.XmlRecordInput -->
+ <class name="XmlRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="XmlRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Deserializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordInput -->
+ <!-- start class org.apache.hadoop.record.XmlRecordOutput -->
+ <class name="XmlRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="XmlRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Serializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordOutput -->
+</package>
+<package name="org.apache.hadoop.record.compiler">
+ <!-- start class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <class name="CodeBuffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A wrapper around StringBuffer that automatically does indentation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.Consts -->
+ <class name="Consts" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="RIO_PREFIX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_VAR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER_FIELDS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_OUTPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_INPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TAG" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[const definitions for Record I/O compiler]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.Consts -->
+ <!-- start class org.apache.hadoop.record.compiler.JBoolean -->
+ <class name="JBoolean" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBoolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBoolean]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBoolean -->
+ <!-- start class org.apache.hadoop.record.compiler.JBuffer -->
+ <class name="JBuffer" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBuffer]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "buffer" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.JByte -->
+ <class name="JByte" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JByte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "byte" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JByte -->
+ <!-- start class org.apache.hadoop.record.compiler.JDouble -->
+ <class name="JDouble" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JDouble"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JDouble]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JDouble -->
+ <!-- start class org.apache.hadoop.record.compiler.JField -->
+ <class name="JField" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JField" type="java.lang.String, T"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JField]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[A thin wrappper around record field.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JField -->
+ <!-- start class org.apache.hadoop.record.compiler.JFile -->
+ <class name="JFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFile" type="java.lang.String, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JFile&gt;, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFile
+
+ @param name possibly full pathname to the file
+ @param inclFiles included files (as JFile)
+ @param recList List of records defined within this file]]>
+ </doc>
+ </constructor>
+ <method name="genCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <param name="destDir" type="java.lang.String"/>
+ <param name="options" type="java.util.ArrayList&lt;java.lang.String&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate record code in given language. Language should be all
+ lowercase.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Container for the Hadoop Record DDL.
+ The main components of the file are filename, list of included files,
+ and records defined in that file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFile -->
+ <!-- start class org.apache.hadoop.record.compiler.JFloat -->
+ <class name="JFloat" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFloat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFloat]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFloat -->
+ <!-- start class org.apache.hadoop.record.compiler.JInt -->
+ <class name="JInt" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JInt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JInt]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "int" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JInt -->
+ <!-- start class org.apache.hadoop.record.compiler.JLong -->
+ <class name="JLong" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JLong"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JLong]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "long" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JLong -->
+ <!-- start class org.apache.hadoop.record.compiler.JMap -->
+ <class name="JMap" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JMap" type="org.apache.hadoop.record.compiler.JType, org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JMap]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JMap -->
+ <!-- start class org.apache.hadoop.record.compiler.JRecord -->
+ <class name="JRecord" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JRecord" type="java.lang.String, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JField&lt;org.apache.hadoop.record.compiler.JType&gt;&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JRecord]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JRecord -->
+ <!-- start class org.apache.hadoop.record.compiler.JString -->
+ <class name="JString" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JString"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JString]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JString -->
+ <!-- start class org.apache.hadoop.record.compiler.JType -->
+ <class name="JType" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Abstract Base class for all types supported by Hadoop Record I/O.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JType -->
+ <!-- start class org.apache.hadoop.record.compiler.JVector -->
+ <class name="JVector" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JVector" type="org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JVector]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JVector -->
+</package>
+<package name="org.apache.hadoop.record.compiler.ant">
+ <!-- start class org.apache.hadoop.record.compiler.ant.RccTask -->
+ <class name="RccTask" extends="org.apache.tools.ant.Task"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RccTask"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of RccTask]]>
+ </doc>
+ </constructor>
+ <method name="setLanguage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the output language option
+ @param language "java"/"c++"]]>
+ </doc>
+ </method>
+ <method name="setFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets the record definition file attribute
+ @param file record definition file]]>
+ </doc>
+ </method>
+ <method name="setFailonerror"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="flag" type="boolean"/>
+ <doc>
+ <![CDATA[Given multiple files (via fileset), set the error handling behavior
+ @param flag true will throw build exception in case of failure (default)]]>
+ </doc>
+ </method>
+ <method name="setDestdir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets directory where output files will be generated
+ @param dir output directory]]>
+ </doc>
+ </method>
+ <method name="addFileset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="set" type="org.apache.tools.ant.types.FileSet"/>
+ <doc>
+ <![CDATA[Adds a fileset that can consist of one or more files
+ @param set Set of record definition files]]>
+ </doc>
+ </method>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="BuildException" type="org.apache.tools.ant.BuildException"/>
+ <doc>
+ <![CDATA[Invoke the Hadoop record compiler on each record definition file]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Hadoop record compiler ant Task
+<p> This task takes the given record definition files and compiles them into
+ java or c++
+ files. It is then up to the user to compile the generated files.
+
+ <p> The task requires the <code>file</code> or the nested fileset element to be
+ specified. Optional attributes are <code>language</code> (set the output
+ language, default is "java"),
+ <code>destdir</code> (name of the destination directory for generated java/c++
+ code, default is ".") and <code>failonerror</code> (specifies error handling
+ behavior. default is true).
+ <p><h4>Usage</h4>
+ <pre>
+ &lt;recordcc
+ destdir="${basedir}/gensrc"
+ language="java"&gt;
+ &lt;fileset include="**\/*.jr" /&gt;
+ &lt;/recordcc&gt;
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.ant.RccTask -->
+</package>
+<package name="org.apache.hadoop.record.compiler.generated">
+ <!-- start class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <class name="ParseException" extends="java.lang.Exception"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ParseException" type="org.apache.hadoop.record.compiler.generated.Token, int[][], java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constructor is used by the method "generateParseException"
+ in the generated parser. Calling this constructor generates
+ a new object of this type with the fields "currentToken",
+ "expectedTokenSequences", and "tokenImage" set. The boolean
+ flag "specialConstructor" is also set to true to indicate that
+ this constructor was used to create this object.
+ This constructor calls its super class with the empty string
+ to force the "toString" method of parent class "Throwable" to
+ print the error message in the form:
+ ParseException: <result of getMessage>]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The following constructors are for use by you for whatever
+ purpose you can think of. Constructing the exception in this
+ manner makes the exception behave in the normal way - i.e., as
+ documented in the class "Throwable". The fields "errorToken",
+ "expectedTokenSequences", and "tokenImage" do not contain
+ relevant information. The JavaCC generated code does not use
+ these constructors.]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method has the standard behavior when this object has been
+ created using the standard constructors. Otherwise, it uses
+ "currentToken" and "expectedTokenSequences" to generate a parse
+ error message and returns it. If this object has been created
+ due to a parse error, and you do not catch it (it gets thrown
+ from the parser), then this method is called during the printing
+ of the final stack trace, and hence the correct error message
+ gets displayed.]]>
+ </doc>
+ </method>
+ <method name="add_escapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Used to convert raw characters to their escaped version
+ when these raw version cannot be used as part of an ASCII
+ string literal.]]>
+ </doc>
+ </method>
+ <field name="specialConstructor" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This variable determines which constructor was used to create
+ this object and thereby affects the semantics of the
+ "getMessage" method (see below).]]>
+ </doc>
+ </field>
+ <field name="currentToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is the last token that has been consumed successfully. If
+ this object has been created due to a parse error, the token
+ followng this token will (therefore) be the first error token.]]>
+ </doc>
+ </field>
+ <field name="expectedTokenSequences" type="int[][]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Each entry in this array is an array of integers. Each array
+ of integers represents a sequence of tokens (by their ordinal
+ values) that is expected at this point of the parse.]]>
+ </doc>
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is a reference to the "tokenImage" array of the generated
+ parser within which the parse error occurred. This array is
+ defined in the generated ...Constants interface.]]>
+ </doc>
+ </field>
+ <field name="eol" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The end of line string for this machine.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This exception is thrown when parse errors are encountered.
+ You can explicitly create objects of this exception type by
+ calling the method generateParseException in the generated
+ parser.
+
+ You can modify this class to customize your error reporting
+ mechanisms so long as you retain the public fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <class name="Rcc" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="Rcc" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="usage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="driver" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="Input" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Include" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Module" return="java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ModuleName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="RecordList" return="java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Record" return="org.apache.hadoop.record.compiler.JRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Field" return="org.apache.hadoop.record.compiler.JField&lt;org.apache.hadoop.record.compiler.JType&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Type" return="org.apache.hadoop.record.compiler.JType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Map" return="org.apache.hadoop.record.compiler.JMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Vector" return="org.apache.hadoop.record.compiler.JVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tm" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"/>
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <method name="generateParseException" return="org.apache.hadoop.record.compiler.generated.ParseException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="enable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="disable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="token_source" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="token" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jj_nt" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <!-- start interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <interface name="RccConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="EOF" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MODULE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCLUDE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BOOLEAN_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SEMICOLON_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CSTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IDENT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinOneLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinMultiLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <class name="RccTokenManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setDebugStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ds" type="java.io.PrintStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="SwitchTo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="jjFillToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="debugStream" type="java.io.PrintStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjstrLiteralImages" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="lexStateNames" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjnewLexState" type="int[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="input_stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="curChar" type="char"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <class name="SimpleCharStream" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setTabSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="getTabSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="ExpandBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="wrapAround" type="boolean"/>
+ </method>
+ <method name="FillBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="BeginToken" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="UpdateLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="c" type="char"/>
+ </method>
+ <method name="readChar" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getEndColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getEndLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="backup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="amount" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="GetImage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="GetSuffix" return="char[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="int"/>
+ </method>
+ <method name="Done"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="adjustBeginLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newLine" type="int"/>
+ <param name="newCol" type="int"/>
+ <doc>
+ <![CDATA[Method to adjust line and column numbers for the start of a token.]]>
+ </doc>
+ </method>
+ <field name="staticFlag" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufpos" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufline" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufcolumn" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="column" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="line" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsCR" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsLF" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputStream" type="java.io.Reader"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="buffer" type="char[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="maxNextCharInd" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inBuf" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="tabSize" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of interface CharStream, where the stream is assumed to
+ contain only ASCII characters (without unicode processing).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Token -->
+ <class name="Token" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Token"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the image.]]>
+ </doc>
+ </method>
+ <method name="newToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="ofKind" type="int"/>
+ <doc>
+ <![CDATA[Returns a new Token object, by default. However, if you want, you
+ can create and return subclass objects based on the value of ofKind.
+ Simply add the cases to the switch for all those special cases.
+ For example, if you have a subclass of Token called IDToken that
+ you want to create if ofKind is ID, simlpy add something like :
+
+ case MyParserConstants.ID : return new IDToken();
+
+ to the following switch statement. Then you can cast matchedToken
+ variable to the appropriate type and use it in your lexical actions.]]>
+ </doc>
+ </method>
+ <field name="kind" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[An integer that describes the kind of this token. This numbering
+ system is determined by JavaCCParser, and a table of these numbers is
+ stored in the file ...Constants.java.]]>
+ </doc>
+ </field>
+ <field name="beginLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="beginColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="image" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The string image of the token.]]>
+ </doc>
+ </field>
+ <field name="next" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A reference to the next regular (non-special) token from the input
+ stream. If this is the last token from the input stream, or if the
+ token manager has not read tokens beyond this one, this field is
+ set to null. This is true only if this token is also a regular
+ token. Otherwise, see below for a description of the contents of
+ this field.]]>
+ </doc>
+ </field>
+ <field name="specialToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This field is used to access special tokens that occur prior to this
+ token, but after the immediately preceding regular (non-special) token.
+ If there are no such special tokens, this field is set to null.
+ When there are more than one such special token, this field refers
+ to the last of these special tokens, which in turn refers to the next
+ previous special token through its specialToken field, and so on
+ until the first special token (whose specialToken field is null).
+ The next fields of special tokens refer to other special tokens that
+ immediately follow it (without an intervening regular token). If there
+ is no such token, this field is null.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Describes the input token stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Token -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+ <class name="TokenMgrError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TokenMgrError"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="boolean, int, int, int, java.lang.String, char, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addEscapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Replaces unprintable characters by their espaced (or unicode escaped)
+ equivalents in the given string]]>
+ </doc>
+ </method>
+ <method name="LexicalError" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="EOFSeen" type="boolean"/>
+ <param name="lexState" type="int"/>
+ <param name="errorLine" type="int"/>
+ <param name="errorColumn" type="int"/>
+ <param name="errorAfter" type="java.lang.String"/>
+ <param name="curChar" type="char"/>
+ <doc>
+ <![CDATA[Returns a detailed message for the Error when it is thrown by the
+ token manager to indicate a lexical error.
+ Parameters :
+ EOFSeen : indicates if EOF caused the lexicl error
+ curLexState : lexical state in which this error occured
+ errorLine : line number when the error occured
+ errorColumn : column number when the error occured
+ errorAfter : prefix that was seen before this error occured
+ curchar : the offending character
+ Note: You can customize the lexical error message by modifying this method.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[You can also modify the body of this method to customize your error messages.
+ For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not
+ of end-users concern, so you can return something like :
+
+ "Internal Error : Please file a bug report .... "
+
+ from this method for such cases in the release version of your parser.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+</package>
+<package name="org.apache.hadoop.record.meta">
+ <!-- start class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <class name="FieldTypeInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's TypeID object]]>
+ </doc>
+ </method>
+ <method name="getFieldID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's id (name)]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two FieldTypeInfos are equal if ach of their fields matches]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ti" type="org.apache.hadoop.record.meta.FieldTypeInfo"/>
+ </method>
+ <doc>
+ <![CDATA[Represents a type information for a field, which is made up of its
+ ID (name) and its type (a TypeID object).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.MapTypeID -->
+ <class name="MapTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapTypeID" type="org.apache.hadoop.record.meta.TypeID, org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getKeyTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's key element]]>
+ </doc>
+ </method>
+ <method name="getValueTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's value element]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two map typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a Map]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.MapTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <class name="RecordTypeInfo" extends="org.apache.hadoop.record.Record"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty RecordTypeInfo object.]]>
+ </doc>
+ </constructor>
+ <constructor name="RecordTypeInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a RecordTypeInfo object representing a record with the given name
+ @param name Name of the record]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the name of the record]]>
+ </doc>
+ </method>
+ <method name="setName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[set the name of the record]]>
+ </doc>
+ </method>
+ <method name="addField"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fieldName" type="java.lang.String"/>
+ <param name="tid" type="org.apache.hadoop.record.meta.TypeID"/>
+ <doc>
+ <![CDATA[Add a field.
+ @param fieldName Name of the field
+ @param tid Type ID of the field]]>
+ </doc>
+ </method>
+ <method name="getFieldTypeInfos" return="java.util.Collection&lt;org.apache.hadoop.record.meta.FieldTypeInfo&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a collection of field type infos]]>
+ </doc>
+ </method>
+ <method name="getNestedStructTypeInfo" return="org.apache.hadoop.record.meta.RecordTypeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the type info of a nested record. We only consider nesting
+ to one level.
+ @param name Name of the nested record]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer_" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ <doc>
+ <![CDATA[This class doesn't implement Comparable as it's not meant to be used
+ for anything besides de/serializing.
+ So we always throw an exception.
+ Not implemented. Always returns 0 if another RecordTypeInfo is passed in.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A record's Type Information object which can read/write itself.
+
+ Type information for a record comprises metadata about the record,
+ as well as a collection of type information for each field in the record.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.StructTypeID -->
+ <class name="StructTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StructTypeID" type="org.apache.hadoop.record.meta.RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a StructTypeID based on the RecordTypeInfo of some record]]>
+ </doc>
+ </constructor>
+ <method name="getFieldTypeInfos" return="java.util.Collection&lt;org.apache.hadoop.record.meta.FieldTypeInfo&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a struct]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.StructTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID -->
+ <class name="TypeID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeVal" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type value. One of the constants in RIOType.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two base typeIDs are equal if they refer to the same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <field name="BoolTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constant classes for the basic types, so we can share them.]]>
+ </doc>
+ </field>
+ <field name="BufferTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ByteTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DoubleTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FloatTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IntTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LongTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="StringTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="typeVal" type="byte"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Represents typeID for basic types.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <class name="TypeID.RIOType" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TypeID.RIOType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <field name="BOOL" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRUCT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[constants representing the IDL types we support]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <!-- start class org.apache.hadoop.record.meta.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <param name="typeID" type="org.apache.hadoop.record.meta.TypeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[read/skip bytes from stream based on a type]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O platform.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.Utils -->
+ <!-- start class org.apache.hadoop.record.meta.VectorTypeID -->
+ <class name="VectorTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VectorTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getElementTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two vector typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for vector.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.VectorTypeID -->
+</package>
+<package name="org.apache.hadoop.security">
+ <!-- start class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <class name="UnixUserGroupInformation" extends="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnixUserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameters user name and its group names.
+ The first entry in the groups list is the default group.
+
+ @param userName a user's name
+ @param groupNames groups list, first of which is the default group
+ @exception IllegalArgumentException if any argument is null]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameter user/group names
+
+ @param ugi an array containing user/group names, the first
+ element of which is the user name, the second of
+ which is the default group name.
+ @exception IllegalArgumentException if the array size is less than 2
+ or any element is null.]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Create an immutable {@link UnixUserGroupInformation} object.]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an array of group names]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the user's name]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize this object
+ First check if this is a UGI in the string format.
+ If no, throw an IOException; otherwise
+ set this object's fields by reading them from the given data input
+
+ @param in input stream
+ @exception IOException is thrown if encounter any error when reading]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize this object
+ First write a string marking that this is a UGI in the string format,
+ then write this object's serialized form to the given data output
+
+ @param out output stream
+ @exception IOException if encounter any error during writing]]>
+ </doc>
+ </method>
+ <method name="saveToConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <param name="ugi" type="org.apache.hadoop.security.UnixUserGroupInformation"/>
+ <doc>
+ <![CDATA[Store the given <code>ugi</code> as a comma separated string in
+ <code>conf</code> as a property <code>attr</code>
+
+ The String starts with the user name followed by the default group names,
+ and other group names.
+
+ @param conf configuration
+ @param attr property name
+ @param ugi a UnixUserGroupInformation]]>
+ </doc>
+ </method>
+ <method name="readFromConf" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Read a UGI from the given <code>conf</code>
+
+ The object is expected to store with the property name <code>attr</code>
+ as a comma separated string that starts
+ with the user name followed by group names.
+ If the property name is not defined, return null.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise, construct a UGI from the configuration, store it in the
+ ugi map and return it.
+
+ @param conf configuration
+ @param attr property name
+ @return a UnixUGI
+ @throws LoginException if the stored string is ill-formatted.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get current user's name and the names of all its groups from Unix.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise get the current user's information from Unix, store it
+ in the map, and return it.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Equivalent to login(conf, false).]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="save" type="boolean"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get a user's name & its group names from the given configuration;
+ If it is not defined in the configuration, get the current user's
+ information from Unix.
+ If the user has a UGI in the ugi map, return the one in
+ the UGI map.
+
+ @param conf either a job configuration or client's configuration
+ @param save saving it to conf?
+ @return UnixUserGroupInformation a user/group information
+ @exception LoginException if not able to get the user/group information]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Decide if two UGIs are the same
+
+ @param other other object
+ @return true if they are the same; false otherwise.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code for this UGI.
+ The hash code for a UGI is the hash code of its user name string.
+
+ @return a hash code value for this UGI.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this object to a string
+
+ @return a comma separated string containing the user name and group names]]>
+ </doc>
+ </method>
+ <field name="UGI_PROPERTY_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of UserGroupInformation in the Unix system]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <!-- start class org.apache.hadoop.security.UserGroupInformation -->
+ <class name="UserGroupInformation" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="UserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCurrentUGI" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="setCurrentUGI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <doc>
+ <![CDATA[Set the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get username
+
+ @return the user's name]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the groups that the user belong to
+
+ @return an array of group names]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Login and return a UserGroupInformation object.]]>
+ </doc>
+ </method>
+ <method name="readFrom" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link UserGroupInformation} from conf]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A {@link Writable} abstract class for storing user and groups information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UserGroupInformation -->
+</package>
+<package name="org.apache.hadoop.tools">
+ <!-- start class org.apache.hadoop.tools.DistCp -->
+ <class name="DistCp" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="DistCp" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="destPath" type="java.lang.String"/>
+ <param name="logPath" type="org.apache.hadoop.fs.Path"/>
+ <param name="srcAsList" type="boolean"/>
+ <param name="ignoreReadFailures" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This is the main driver for recursively copying directories
+ across file systems. It takes at least two cmdline parameters. A source
+ URL and a destination URL. It then essentially does an "ls -lR" on the
+ source URL, and writes the output in a round-robin manner to all the map
+ input files. The mapper actually copies the files allotted to it. The
+ reduce is empty.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="getRandomId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A Map-reduce program to recursively copy directories between
+ different file-systems.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCp -->
+ <!-- start class org.apache.hadoop.tools.DistCp.DuplicationException -->
+ <class name="DistCp.DuplicationException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="ERROR_CODE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Error code for this exception]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An exception class for duplicated source files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCp.DuplicationException -->
+ <!-- start class org.apache.hadoop.tools.HadoopArchives -->
+ <class name="HadoopArchives" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="HadoopArchives" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="archive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPaths" type="java.util.List&lt;org.apache.hadoop.fs.Path&gt;"/>
+ <param name="archiveName" type="java.lang.String"/>
+ <param name="dest" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[archive the given source paths into
+ the dest
+ @param srcPaths the src paths to be archived
+ @param dest the dest dir that will contain the archive]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[the main driver for creating the archives
+ it takes at least two command line parameters. The src and the
+ dest. It does an lsr on the source paths.
+ The mapper created archuves and the reducer creates
+ the archive index.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[the main functions]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[a archive creation utility.
+ This class provides methods that can be used
+ to create hadoop archives. For understanding of
+ Hadoop archives look at {@link HarFileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.HadoopArchives -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer -->
+ <class name="Logalyzer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Logalyzer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doArchive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logListURI" type="java.lang.String"/>
+ <param name="archiveDirectory" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doArchive: Workhorse function to archive log-files.
+ @param logListURI : The uri which will serve list of log-files to archive.
+ @param archiveDirectory : The directory to store archived logfiles.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="doAnalyze"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFilesDirectory" type="java.lang.String"/>
+ <param name="outputDirectory" type="java.lang.String"/>
+ <param name="grepPattern" type="java.lang.String"/>
+ <param name="sortColumns" type="java.lang.String"/>
+ <param name="columnSeparator" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doAnalyze:
+ @param inputFilesDirectory : Directory containing the files to be analyzed.
+ @param outputDirectory : Directory to store analysis (output).
+ @param grepPattern : Pattern to *grep* for.
+ @param sortColumns : Sort specification for output.
+ @param columnSeparator : Column separator.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[Logalyzer: A utility tool for archiving and analyzing hadoop logs.
+ <p>
+ This tool supports archiving and anaylzing (sort/grep) of log-files.
+ It takes as input
+ a) Input uri which will serve uris of the logs to be archived.
+ b) Output directory (not mandatory).
+ b) Directory on dfs to archive the logs.
+ c) The sort/grep patterns for analyzing the files and separator for boundaries.
+ Usage:
+ Logalyzer -archive -archiveDir <directory to archive logs> -analysis <directory> -logs <log-list uri> -grep <pattern> -sort <col1, col2> -separator <separator>
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <class name="Logalyzer.LogComparator" extends="org.apache.hadoop.io.Text.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Logalyzer.LogComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys of the logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+ <class name="Logalyzer.LogRegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="Logalyzer.LogRegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+</package>
+<package name="org.apache.hadoop.util">
+ <!-- start class org.apache.hadoop.util.Daemon -->
+ <class name="Daemon" extends="java.lang.Thread"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Daemon"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.ThreadGroup, java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread to be part of a specified thread group.]]>
+ </doc>
+ </constructor>
+ <method name="getRunnable" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A thread that has called {@link Thread#setDaemon(boolean) } with true.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Daemon -->
+ <!-- start class org.apache.hadoop.util.DiskChecker -->
+ <class name="DiskChecker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mkdirsWithExistsCheck" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[The semantics of mkdirsWithExistsCheck method is different from the mkdirs
+ method provided in the Sun's java.io.File class in the following way:
+ While creating the non-existent parent directories, this method checks for
+ the existence of those directories if the mkdir fails at any point (since
+ that directory might have just been created by some other process).
+ If both mkdir() and the exists() check fails for any seemingly
+ non-existent directory, then we signal an error; Sun's mkdir would signal
+ an error (return false) if a directory it is attempting to create already
+ exists or the mkdir fails.
+ @param dir
+ @return true on success, false on failure]]>
+ </doc>
+ </method>
+ <method name="checkDir"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ </method>
+ <doc>
+ <![CDATA[Class that provides utility functions for checking disk problem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <class name="DiskChecker.DiskErrorException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskErrorException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <class name="DiskChecker.DiskOutOfSpaceException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskOutOfSpaceException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <!-- start class org.apache.hadoop.util.GenericOptionsParser -->
+ <class name="GenericOptionsParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser<code> to parse only the generic Hadoop
+ arguments.
+
+ The array of string arguments other than the generic arguments can be
+ obtained by {@link #getRemainingArgs()}.
+
+ @param conf the <code>Configuration</code> to modify.
+ @param args command-line arguments.]]>
+ </doc>
+ </constructor>
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, org.apache.commons.cli.Options, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser</code> to parse given options as well
+ as generic Hadoop options.
+
+ The resulting <code>CommandLine</code> object can be obtained by
+ {@link #getCommandLine()}.
+
+ @param conf the configuration to modify
+ @param options options built by the caller
+ @param args User-specified arguments]]>
+ </doc>
+ </constructor>
+ <method name="getRemainingArgs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array of Strings containing only application-specific arguments.
+
+ @return array of <code>String</code>s containing the un-parsed arguments
+ or <strong>empty array</strong> if commandLine was not defined.]]>
+ </doc>
+ </method>
+ <method name="getCommandLine" return="org.apache.commons.cli.CommandLine"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the commons-cli <code>CommandLine</code> object
+ to process the parsed arguments.
+
+ Note: If the object is created with
+ {@link #GenericOptionsParser(Configuration, String[])}, then returned
+ object will only contain parsed generic options.
+
+ @return <code>CommandLine</code> representing list of arguments
+ parsed against Options descriptor.]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Print the usage message for generic command-line options supported.
+
+ @param out stream to print the usage message to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>GenericOptionsParser</code> is a utility to parse command line
+ arguments generic to the Hadoop framework.
+
+ <code>GenericOptionsParser</code> recognizes several standarad command
+ line arguments, enabling applications to easily specify a namenode, a
+ jobtracker, additional configuration resources etc.
+
+ <h4 id="GenericOptions">Generic Options</h4>
+
+ <p>The supported generic options are:</p>
+ <p><blockquote><pre>
+ -conf &lt;configuration file&gt; specify a configuration file
+ -D &lt;property=value&gt; use value for given property
+ -fs &lt;local|namenode:port&gt; specify a namenode
+ -jt &lt;local|jobtracker:port&gt; specify a job tracker
+ -files &lt;comma separated list of files&gt; specify comma separated
+ files to be copied to the map reduce cluster
+ -libjars &lt;comma separated list of jars&gt; specify comma separated
+ jar files to include in the classpath.
+ -archives &lt;comma separated list of archives&gt; specify comma
+ separated archives to be unarchived on the compute machines.
+
+ </pre></blockquote></p>
+
+ <p>The general command line syntax is:</p>
+ <p><tt><pre>
+ bin/hadoop command [genericOptions] [commandOptions]
+ </pre></tt></p>
+
+ <p>Generic command line arguments <strong>might</strong> modify
+ <code>Configuration </code> objects, given to constructors.</p>
+
+ <p>The functionality is implemented using Commons CLI.</p>
+
+ <p>Examples:</p>
+ <p><blockquote><pre>
+ $ bin/hadoop dfs -fs darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -conf hadoop-site.xml -ls /data
+ list /data directory in dfs with conf specified in hadoop-site.xml
+
+ $ bin/hadoop job -D mapred.job.tracker=darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt local -submit job.xml
+ submit a job to local runner
+
+ $ bin/hadoop jar -libjars testlib.jar
+ -archives test.tgz -files file.txt inputjar args
+ job submission with libjars, files and archives
+ </pre></blockquote></p>
+
+ @see Tool
+ @see ToolRunner]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericOptionsParser -->
+ <!-- start class org.apache.hadoop.util.GenericsUtil -->
+ <class name="GenericsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericsUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClass" return="java.lang.Class&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <doc>
+ <![CDATA[Returns the Class object (of type <code>Class&lt;T&gt;</code>) of the
+ argument of type <code>T</code>.
+ @param <T> The type of the argument
+ @param t the object to get it class
+ @return <code>Class&lt;T&gt;</code>]]>
+ </doc>
+ </method>
+ <method name="toArray" return="T[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <param name="list" type="java.util.List&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param c the Class object of the items in the list
+ @param list the list to convert]]>
+ </doc>
+ </method>
+ <method name="toArray" return="T[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="list" type="java.util.List&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param list the list to convert
+ @throws ArrayIndexOutOfBoundsException if the list is empty.
+ Use {@link #toArray(Class, List)} if the list may be empty.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Contains utility methods for dealing with Java Generics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericsUtil -->
+ <!-- start class org.apache.hadoop.util.HeapSort -->
+ <class name="HeapSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="HeapSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the given range of items using heap sort.
+ {@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of HeapSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.HeapSort -->
+ <!-- start class org.apache.hadoop.util.HostsFileReader -->
+ <class name="HostsFileReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HostsFileReader" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="refresh"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getHosts" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExcludedHosts" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.util.HostsFileReader -->
+ <!-- start interface org.apache.hadoop.util.IndexedSortable -->
+ <interface name="IndexedSortable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Compare items at the given addresses consistent with the semantics of
+ {@link java.util.Comparable#compare}.]]>
+ </doc>
+ </method>
+ <method name="swap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Swap items at the given addresses.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for collections capable of being sorted by {@link IndexedSorter}
+ algorithms.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSortable -->
+ <!-- start interface org.apache.hadoop.util.IndexedSorter -->
+ <interface name="IndexedSorter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the items accessed through the given IndexedSortable over the given
+ range of logical indices. From the perspective of the sort algorithm,
+ each index between l (inclusive) and r (exclusive) is an addressable
+ entry.
+ @see IndexedSortable#compare
+ @see IndexedSortable#swap]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Same as {@link #sort(IndexedSortable,int,int)}, but indicate progress
+ periodically.
+ @see #sort(IndexedSortable,int,int)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for sort algorithms accepting {@link IndexedSortable} items.
+
+ A sort algorithm implementing this interface may only
+ {@link IndexedSortable#compare} and {@link IndexedSortable#swap} items
+ for a range of indices to effect a sort across that range.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSorter -->
+ <!-- start class org.apache.hadoop.util.MergeSort -->
+ <class name="MergeSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MergeSort" type="java.util.Comparator&lt;org.apache.hadoop.io.IntWritable&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mergeSort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="int[]"/>
+ <param name="dest" type="int[]"/>
+ <param name="low" type="int"/>
+ <param name="high" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of MergeSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.MergeSort -->
+ <!-- start class org.apache.hadoop.util.NativeCodeLoader -->
+ <class name="NativeCodeLoader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeCodeLoader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeCodeLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if native-hadoop code is loaded for this platform.
+
+ @return <code>true</code> if native-hadoop is loaded,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getLoadNativeLibraries" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Return if native hadoop libraries, if present, can be used for this job.
+ @param jobConf job configuration
+
+ @return <code>true</code> if native hadoop libraries, if present, can be
+ used for this job; <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setLoadNativeLibraries"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="loadNativeLibraries" type="boolean"/>
+ <doc>
+ <![CDATA[Set if native hadoop libraries, if present, can be used for this job.
+
+ @param jobConf job configuration
+ @param loadNativeLibraries can native hadoop libraries be loaded]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A helper to load the native hadoop code i.e. libhadoop.so.
+ This handles the fallback to either the bundled libhadoop-Linux-i386-32.so
+ or the default java implementations where appropriate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.NativeCodeLoader -->
+ <!-- start class org.apache.hadoop.util.PlatformName -->
+ <class name="PlatformName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PlatformName"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPlatformName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete platform as per the java-vm.
+ @return returns the complete platform as per the java-vm.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[A helper class for getting build-info of the java-vm.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PlatformName -->
+ <!-- start class org.apache.hadoop.util.PrintJarMainClass -->
+ <class name="PrintJarMainClass" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PrintJarMainClass"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A micro-application that prints the main class name out of a jar file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PrintJarMainClass -->
+ <!-- start class org.apache.hadoop.util.PriorityQueue -->
+ <class name="PriorityQueue" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PriorityQueue"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="lessThan" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Determines the ordering of objects in this priority queue. Subclasses
+ must define this one method.]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="maxSize" type="int"/>
+ <doc>
+ <![CDATA[Subclass constructors must call this.]]>
+ </doc>
+ </method>
+ <method name="put"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Adds an Object to a PriorityQueue in log(size) time.
+ If one tries to add more objects than maxSize from initialize
+ a RuntimeException (ArrayIndexOutOfBound) is thrown.]]>
+ </doc>
+ </method>
+ <method name="insert" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Adds element to the PriorityQueue in log(size) time if either
+ the PriorityQueue is not full, or not lessThan(element, top()).
+ @param element
+ @return true if element is added, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="top" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the least element of the PriorityQueue in constant time.]]>
+ </doc>
+ </method>
+ <method name="pop" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes and returns the least element of the PriorityQueue in log(size)
+ time.]]>
+ </doc>
+ </method>
+ <method name="adjustTop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should be called when the Object at top changes values. Still log(n)
+ worst case, but it's at least twice as fast to <pre>
+ { pq.top().change(); pq.adjustTop(); }
+ </pre> instead of <pre>
+ { o = pq.pop(); o.change(); pq.push(o); }
+ </pre>]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of elements currently stored in the PriorityQueue.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes all entries from the PriorityQueue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A PriorityQueue maintains a partial ordering of its elements such that the
+ least element can always be found in constant time. Put()'s and pop()'s
+ require log(size) time.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PriorityQueue -->
+ <!-- start class org.apache.hadoop.util.ProgramDriver -->
+ <class name="ProgramDriver" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ProgramDriver"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="mainClass" type="java.lang.Class"/>
+ <param name="description" type="java.lang.String"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is the method that adds the classed to the repository
+ @param name The name of the string you want the class instance to be called with
+ @param mainClass The class that you want to add to the repository
+ @param description The description of the class
+ @throws NoSuchMethodException
+ @throws SecurityException]]>
+ </doc>
+ </method>
+ <method name="driver"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is a driver for the example programs.
+ It looks at the first command line argument and tries to find an
+ example program with that name.
+ If it is found, it calls the main method in that class with the rest
+ of the command line arguments.
+ @param args The argument from the user. args[0] is the command to run.
+ @throws NoSuchMethodException
+ @throws SecurityException
+ @throws IllegalAccessException
+ @throws IllegalArgumentException
+ @throws Throwable Anything thrown by the example program's main]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A driver that is used to run programs added to it]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ProgramDriver -->
+ <!-- start class org.apache.hadoop.util.Progress -->
+ <class name="Progress" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Progress"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new root node.]]>
+ </doc>
+ </constructor>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a named node to the tree.]]>
+ </doc>
+ </method>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds a node to the tree.]]>
+ </doc>
+ </method>
+ <method name="startNextPhase"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Called during execution to move to the next phase at this level in the
+ tree.]]>
+ </doc>
+ </method>
+ <method name="phase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current sub-node executing.]]>
+ </doc>
+ </method>
+ <method name="complete"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Completes this node, moving the parent node to its next child.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progress" type="float"/>
+ <doc>
+ <![CDATA[Called during execution on a leaf node to set its progress.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the overall progress of the root.]]>
+ </doc>
+ </method>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Utility to assist with generation of progress reports. Applications build
+ a hierarchy of {@link Progress} instances, each modelling a phase of
+ execution. The root is constructed with {@link #Progress()}. Nodes for
+ sub-phases are created by calling {@link #addPhase()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Progress -->
+ <!-- start interface org.apache.hadoop.util.Progressable -->
+ <interface name="Progressable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Report progress to the Hadoop framework.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for reporting progress.
+
+ <p>Clients and/or applications can use the provided <code>Progressable</code>
+ to explicitly report progress to the Hadoop framework. This is especially
+ important for operations which take an insignificant amount of time since,
+ in-lieu of the reported progress, the framework has to assume that an error
+ has occured and time-out the operation.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Progressable -->
+ <!-- start class org.apache.hadoop.util.QuickSort -->
+ <class name="QuickSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="QuickSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMaxDepth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="x" type="int"/>
+ <doc>
+ <![CDATA[Deepest recursion before giving up and doing a heapsort.
+ Returns 2 * ceil(log(n)).]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the given range of items using quick sort.
+ {@inheritDoc} If the recursion depth falls below {@link #getMaxDepth},
+ then switch to {@link HeapSort}.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of QuickSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.QuickSort -->
+ <!-- start class org.apache.hadoop.util.ReflectionUtils -->
+ <class name="ReflectionUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReflectionUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theObject" type="java.lang.Object"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check and set 'configuration' if necessary.
+
+ @param theObject object for which to set configuration
+ @param conf Configuration]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create an object for the given class and initialize it from conf
+
+ @param theClass class of which an object is created
+ @param conf Configuration
+ @return a new object]]>
+ </doc>
+ </method>
+ <method name="setContentionTracing"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="boolean"/>
+ </method>
+ <method name="printThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.PrintWriter"/>
+ <param name="title" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Print all of the thread's information and stack traces.
+
+ @param stream the stream to
+ @param title a string title for the stack trace]]>
+ </doc>
+ </method>
+ <method name="logThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="title" type="java.lang.String"/>
+ <param name="minInterval" type="long"/>
+ <doc>
+ <![CDATA[Log the current thread stacks at INFO level.
+ @param log the logger that logs the stack trace
+ @param title a descriptive title for the call stacks
+ @param minInterval the minimum time from the last]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="T"/>
+ <doc>
+ <![CDATA[Return the correctly-typed {@link Class} of the given object.
+
+ @param o object whose correctly-typed <code>Class</code> is to be obtained
+ @return the correctly typed <code>Class</code> of the given object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General reflection utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ReflectionUtils -->
+ <!-- start class org.apache.hadoop.util.RunJar -->
+ <class name="RunJar" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RunJar"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="unJar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jarFile" type="java.io.File"/>
+ <param name="toDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unpack a jar file into a directory.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Run a Hadoop job jar. If the main class is not in the jar's manifest,
+ then it must be provided on the command line.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Run a Hadoop job jar.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.RunJar -->
+ <!-- start class org.apache.hadoop.util.ServletUtil -->
+ <class name="ServletUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ServletUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initHTML" return="java.io.PrintWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="response" type="javax.servlet.ServletResponse"/>
+ <param name="title" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initial HTML header]]>
+ </doc>
+ </method>
+ <method name="getParameter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.ServletRequest"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a parameter from a ServletRequest.
+ Return null if the parameter contains only white spaces.]]>
+ </doc>
+ </method>
+ <method name="htmlFooter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HTML footer to be added in the jsps.
+ @return the HTML footer.]]>
+ </doc>
+ </method>
+ <field name="HTML_TAIL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.util.ServletUtil -->
+ <!-- start class org.apache.hadoop.util.Shell -->
+ <class name="Shell" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param interval the minimum duration to wait before re-executing the
+ command.]]>
+ </doc>
+ </constructor>
+ <method name="getGROUPS_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's groups list]]>
+ </doc>
+ </method>
+ <method name="getGET_PERMISSION_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a Unix command to get permission information.]]>
+ </doc>
+ </method>
+ <method name="getUlimitMemoryCommand" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the Unix command for setting the maximum virtual memory available
+ to a given child process. This is only relevant when we are forking a
+ process from within the {@link org.apache.hadoop.mapred.Mapper} or the
+ {@link org.apache.hadoop.mapred.Reducer} implementations
+ e.g. <a href="{@docRoot}/org/apache/hadoop/mapred/pipes/package-summary.html">Hadoop Pipes</a>
+ or <a href="{@docRoot}/org/apache/hadoop/streaming/package-summary.html">Hadoop Streaming</a>.
+
+ It also checks to ensure that we are running on a *nix platform else
+ (e.g. in Cygwin/Windows) it returns <code>null</code>.
+ @param job job configuration
+ @return a <code>String[]</code> with the ulimit command arguments or
+ <code>null</code> if we are running on a non *nix platform or
+ if the limit is unspecified.]]>
+ </doc>
+ </method>
+ <method name="setEnvironment"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="env" type="java.util.Map&lt;java.lang.String, java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[set the environment for the command
+ @param env Mapping of environment variables]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[set the working directory
+ @param dir The directory where the command would be executed]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[check to see if a command needs to be executed and execute if needed]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return an array containing the command name & its parameters]]>
+ </doc>
+ </method>
+ <method name="parseExecResult"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the execution result]]>
+ </doc>
+ </method>
+ <method name="getProcess" return="java.lang.Process"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the current sub-process executing the given command
+ @return process executing the command]]>
+ </doc>
+ </method>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the exit code
+ @return the exit code of the process]]>
+ </doc>
+ </method>
+ <method name="execCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Static method to execute a shell command.
+ Covers most of the simple cases without requiring the user to implement
+ the <code>Shell</code> interface.
+ @param cmd shell command to execute.
+ @return the output of the executed command.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USER_NAME_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's name]]>
+ </doc>
+ </field>
+ <field name="SET_PERMISSION_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set permission]]>
+ </doc>
+ </field>
+ <field name="SET_OWNER_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set owner]]>
+ </doc>
+ </field>
+ <field name="SET_GROUP_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WINDOWS" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set to true on Windows platforms]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A base class for running a Unix command.
+
+ <code>Shell</code> can be used to run unix commands like <code>du</code> or
+ <code>df</code>. It also offers facilities to gate commands by
+ time-intervals.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell -->
+ <!-- start class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <class name="Shell.ExitCodeException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ExitCodeException" type="int, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is an IOException with exit code added.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <!-- start class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <class name="Shell.ShellCommandExecutor" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File, java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the shell command.]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getOutput" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the output of the shell command.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple shell command executor.
+
+ <code>ShellCommandExecutor</code>should be used in cases where the output
+ of the command needs no explicit parsing and where the command, working
+ directory and the environment remains unchanged. The output of the command
+ is stored as-is and is expected to be small.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <!-- start class org.apache.hadoop.util.StringUtils -->
+ <class name="StringUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StringUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stringifyException" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Make a string representation of the exception.
+ @param e The exception to stringify
+ @return A string with exception name and call stack.]]>
+ </doc>
+ </method>
+ <method name="simpleHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fullHostname" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a full hostname, return the word upto the first dot.
+ @param fullHostname the full hostname
+ @return the hostname to the first dot]]>
+ </doc>
+ </method>
+ <method name="humanReadableInt" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="number" type="long"/>
+ <doc>
+ <![CDATA[Given an integer, return a string that is in an approximate, but human
+ readable format.
+ It uses the bases 'k', 'm', and 'g' for 1024, 1024**2, and 1024**3.
+ @param number the number to format
+ @return a human readable form of the integer]]>
+ </doc>
+ </method>
+ <method name="formatPercent" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="done" type="double"/>
+ <param name="digits" type="int"/>
+ <doc>
+ <![CDATA[Format a percentage for presentation to the user.
+ @param done the percentage to format (0.0 to 1.0)
+ @param digits the number of digits past the decimal point
+ @return a string representation of the percentage]]>
+ </doc>
+ </method>
+ <method name="arrayToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strs" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Given an array of strings, return a comma-separated list of its elements.
+ @param strs Array of strings
+ @return Empty string if strs.length is 0, comma separated list of strings
+ otherwise]]>
+ </doc>
+ </method>
+ <method name="byteToHexString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Given an array of bytes it will convert the bytes to a hex string
+ representation of the bytes
+ @param bytes
+ @return hex string representation of the byte array]]>
+ </doc>
+ </method>
+ <method name="hexStringToByte" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a hexstring this will return the byte array corresponding to the
+ string
+ @param hex the hex String array
+ @return a byte array that is a hex string representation of the given
+ string. The size of the byte array is therefore hex.length/2]]>
+ </doc>
+ </method>
+ <method name="uriToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uris" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[@param uris]]>
+ </doc>
+ </method>
+ <method name="stringToURI" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="stringToPath" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="formatTimeDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Given a finish and start time in long milliseconds, returns a
+ String in the format Xhrs, Ymins, Z sec, for the time difference between two times.
+ If finish time comes before start time then negative valeus of X, Y and Z wil return.
+
+ @param finishTime finish time
+ @param startTime start time]]>
+ </doc>
+ </method>
+ <method name="getFormattedTimeWithDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dateFormat" type="java.text.DateFormat"/>
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Formats time in ms and appends difference (finishTime - startTime)
+ as returned by formatTimeDiff().
+ If finish time is 0, empty string is returned, if start time is 0
+ then difference is not appended to return value.
+ @param dateFormat date format to use
+ @param finishTime fnish time
+ @param startTime start time
+ @return formatted value.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an arraylist of strings.
+ @param str the comma seperated string values
+ @return the arraylist of the comma seperated string values]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a collection of strings.
+ @param str comma seperated string values
+ @return an <code>ArrayList</code> of string values]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Split a string using the default separator
+ @param str a string that may have escaped separator
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="separator" type="char"/>
+ <doc>
+ <![CDATA[Split a string using the given separator
+ @param str a string that may have escaped separator
+ @param escapeChar a char that be used to escape the separator
+ @param separator a separator char
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Escape commas in the string using the default escape char
+ @param str a string
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Escape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the char to be escaped
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Unescape commas in the string using the default escape char
+ @param str a string
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Unescape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the escaped char
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="getHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return hostname without throwing exception.
+ @return hostname]]>
+ </doc>
+ </method>
+ <method name="startupShutdownMessage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <param name="args" type="java.lang.String[]"/>
+ <param name="LOG" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Print a log message for starting up and shutting down
+ @param clazz the class of the server
+ @param args arguments
+ @param LOG the target log object]]>
+ </doc>
+ </method>
+ <field name="COMMA" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ESCAPE_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[General string utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.StringUtils -->
+ <!-- start interface org.apache.hadoop.util.Tool -->
+ <interface name="Tool" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Execute the command with the given arguments.
+
+ @param args command specific arguments.
+ @return exit code.
+ @throws Exception]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A tool interface that supports handling of generic command-line options.
+
+ <p><code>Tool</code>, is the standard for any Map-Reduce tool/application.
+ The tool/application should delegate the handling of
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ standard command-line options</a> to {@link ToolRunner#run(Tool, String[])}
+ and only handle its custom arguments.</p>
+
+ <p>Here is how a typical <code>Tool</code> is implemented:</p>
+ <p><blockquote><pre>
+ public class MyApp extends Configured implements Tool {
+
+ public int run(String[] args) throws Exception {
+ // <code>Configuration</code> processed by <code>ToolRunner</code>
+ Configuration conf = getConf();
+
+ // Create a JobConf using the processed <code>conf</code>
+ JobConf job = new JobConf(conf, MyApp.class);
+
+ // Process custom command-line options
+ Path in = new Path(args[1]);
+ Path out = new Path(args[2]);
+
+ // Specify various job-specific parameters
+ job.setJobName("my-app");
+ job.setInputPath(in);
+ job.setOutputPath(out);
+ job.setMapperClass(MyApp.MyMapper.class);
+ job.setReducerClass(MyApp.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ }
+
+ public static void main(String[] args) throws Exception {
+ // Let <code>ToolRunner</code> handle generic command-line options
+ int res = ToolRunner.run(new Configuration(), new Sort(), args);
+
+ System.exit(res);
+ }
+ }
+ </pre></blockquote></p>
+
+ @see GenericOptionsParser
+ @see ToolRunner]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Tool -->
+ <!-- start class org.apache.hadoop.util.ToolRunner -->
+ <class name="ToolRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ToolRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the given <code>Tool</code> by {@link Tool#run(String[])}, after
+ parsing with the given generic arguments. Uses the given
+ <code>Configuration</code>, or builds one if null.
+
+ Sets the <code>Tool</code>'s configuration with the possibly modified
+ version of the <code>conf</code>.
+
+ @param conf <code>Configuration</code> for the <code>Tool</code>.
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the <code>Tool</code> with its <code>Configuration</code>.
+
+ Equivalent to <code>run(tool.getConf(), tool, args)</code>.
+
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Prints generic command-line argurments and usage information.
+
+ @param out stream to write usage information to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A utility to help run {@link Tool}s.
+
+ <p><code>ToolRunner</code> can be used to run classes implementing
+ <code>Tool</code> interface. It works in conjunction with
+ {@link GenericOptionsParser} to parse the
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ generic hadoop command line arguments</a> and modifies the
+ <code>Configuration</code> of the <code>Tool</code>. The
+ application-specific options are passed along without being modified.
+ </p>
+
+ @see Tool
+ @see GenericOptionsParser]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ToolRunner -->
+ <!-- start class org.apache.hadoop.util.VersionInfo -->
+ <class name="VersionInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the Hadoop version.
+ @return the Hadoop version string, eg. "0.6.3-dev"]]>
+ </doc>
+ </method>
+ <method name="getRevision" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion revision number for the root directory
+ @return the revision number, eg. "451451"]]>
+ </doc>
+ </method>
+ <method name="getDate" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The date that Hadoop was compiled.
+ @return the compilation date in unix date format]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The user that compiled Hadoop.
+ @return the username of the user]]>
+ </doc>
+ </method>
+ <method name="getUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion URL for the root Hadoop directory.]]>
+ </doc>
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the buildVersion which includes version,
+ revision, user and date.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[This class finds the package info for Hadoop and the HadoopVersionAnnotation
+ information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.VersionInfo -->
+ <!-- start class org.apache.hadoop.util.XMLUtils -->
+ <class name="XMLUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="XMLUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="transform"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="styleSheet" type="java.io.InputStream"/>
+ <param name="xml" type="java.io.InputStream"/>
+ <param name="out" type="java.io.Writer"/>
+ <exception name="TransformerConfigurationException" type="javax.xml.transform.TransformerConfigurationException"/>
+ <exception name="TransformerException" type="javax.xml.transform.TransformerException"/>
+ <doc>
+ <![CDATA[Transform input xml given a stylesheet.
+
+ @param styleSheet the style-sheet
+ @param xml input xml data
+ @param out output
+ @throws TransformerConfigurationException
+ @throws TransformerException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General xml utilities.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.XMLUtils -->
+</package>
+
+</api>
diff --git a/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.18.3.xml b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.18.3.xml
new file mode 100644
index 0000000000..564916fef7
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.18.3.xml
@@ -0,0 +1,38826 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Thu Jan 22 23:06:56 UTC 2009 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="hadoop 0.18.3"
+ jdversion="1.1.1">
+
+<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/hadoopqa/tools/jdiff/latest/jdiff.jar:/home/hadoopqa/tools/jdiff/latest/xerces.jar -classpath /home/ndaley/hadoop/branch-0.18/build/classes:/home/ndaley/hadoop/branch-0.18/lib/commons-cli-2.0-SNAPSHOT.jar:/home/ndaley/hadoop/branch-0.18/lib/commons-codec-1.3.jar:/home/ndaley/hadoop/branch-0.18/lib/commons-httpclient-3.0.1.jar:/home/ndaley/hadoop/branch-0.18/lib/commons-logging-1.0.4.jar:/home/ndaley/hadoop/branch-0.18/lib/commons-logging-api-1.0.4.jar:/home/ndaley/hadoop/branch-0.18/lib/commons-net-1.4.1.jar:/home/ndaley/hadoop/branch-0.18/lib/jets3t-0.6.0.jar:/home/ndaley/hadoop/branch-0.18/lib/jetty-5.1.4.jar:/home/ndaley/hadoop/branch-0.18/lib/jetty-ext/commons-el.jar:/home/ndaley/hadoop/branch-0.18/lib/jetty-ext/jasper-compiler.jar:/home/ndaley/hadoop/branch-0.18/lib/jetty-ext/jasper-runtime.jar:/home/ndaley/hadoop/branch-0.18/lib/jetty-ext/jsp-api.jar:/home/ndaley/hadoop/branch-0.18/lib/junit-3.8.1.jar:/home/ndaley/hadoop/branch-0.18/lib/kfs-0.1.3.jar:/home/ndaley/hadoop/branch-0.18/lib/log4j-1.2.15.jar:/home/ndaley/hadoop/branch-0.18/lib/oro-2.0.8.jar:/home/ndaley/hadoop/branch-0.18/lib/servlet-api.jar:/home/ndaley/hadoop/branch-0.18/lib/slf4j-api-1.4.3.jar:/home/ndaley/hadoop/branch-0.18/lib/slf4j-log4j12-1.4.3.jar:/home/ndaley/hadoop/branch-0.18/lib/xmlenc-0.52.jar:/home/ndaley/hadoop/branch-0.18/conf:/home/ndaley/tools/ant/latest/lib/ant-launcher.jar:/home/ndaley/tools/ant/latest/lib/ant-antlr.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-bcel.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-bsf.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-log4j.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-oro.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-regexp.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-resolver.jar:/home/ndaley/tools/ant/latest/lib/ant-commons-logging.jar:/home/ndaley/tools/ant/latest/lib/ant-commons-net.jar:/home/ndaley/tools/ant/latest/lib/ant-jai.jar:/home/ndaley/tools/ant/latest/lib/ant-javamail.jar:/home/ndaley/tools/ant/latest/lib/ant-jdepend.jar:/home/ndaley/tools/ant/latest/lib/ant-jmf.jar:/home/ndaley/tools/ant/latest/lib/ant-jsch.jar:/home/ndaley/tools/ant/latest/lib/ant-junit.jar:/home/ndaley/tools/ant/latest/lib/ant-netrexx.jar:/home/ndaley/tools/ant/latest/lib/ant-nodeps.jar:/home/ndaley/tools/ant/latest/lib/ant-starteam.jar:/home/ndaley/tools/ant/latest/lib/ant-stylebook.jar:/home/ndaley/tools/ant/latest/lib/ant-swing.jar:/home/ndaley/tools/ant/latest/lib/ant-testutil.jar:/home/ndaley/tools/ant/latest/lib/ant-trax.jar:/home/ndaley/tools/ant/latest/lib/ant-weblogic.jar:/home/ndaley/tools/ant/latest/lib/ant.jar:/home/ndaley/tools/ant/latest/lib/xercesImpl.jar:/home/ndaley/tools/ant/latest/lib/xml-apis.jar:/home/hadoopqa/tools/java/jdk1.5.0_11-32bit/lib/tools.jar -sourcepath /home/ndaley/hadoop/branch-0.18/src/core:/home/ndaley/hadoop/branch-0.18/src/mapred:/home/ndaley/hadoop/branch-0.18/src/tools -apidir /home/ndaley/hadoop/branch-0.18/docs/jdiff -apiname hadoop 0.18.3 -->
+<package name="org.apache.hadoop">
+ <!-- start class org.apache.hadoop.HadoopVersionAnnotation -->
+ <class name="HadoopVersionAnnotation" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.annotation.Annotation"/>
+ <doc>
+ <![CDATA[A package attribute that captures the version of Hadoop that was compiled.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.HadoopVersionAnnotation -->
+</package>
+<package name="org.apache.hadoop.conf">
+ <!-- start interface org.apache.hadoop.conf.Configurable -->
+ <interface name="Configurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration to be used by this object.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the configuration used by this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Something that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.conf.Configurable -->
+ <!-- start class org.apache.hadoop.conf.Configuration -->
+ <class name="Configuration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable&lt;java.util.Map.Entry&lt;java.lang.String, java.lang.String&gt;&gt;"/>
+ <constructor name="Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configuration" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration with the same settings cloned from another.
+
+ @param other the configuration from which to clone settings.]]>
+ </doc>
+ </constructor>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param name resource to be added, the classpath is examined for a file
+ with that name.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="url" type="java.net.URL"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param url url of the resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param file file-path of resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, <code>null</code> if
+ no such property exists.
+
+ Values are processed for <a href="#VariableExpansion">variable expansion</a>
+ before being returned.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="getRaw" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, without doing
+ <a href="#VariableExpansion">variable expansion</a>.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the <code>value</code> of the <code>name</code> property.
+
+ @param name property name.
+ @param value property value.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property. If no such property
+ exists, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value, or <code>defaultValue</code> if the property
+ doesn't exist.]]>
+ </doc>
+ </method>
+ <method name="getInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="int"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as an <code>int</code>.
+
+ If no such property exists, or if the specified value is not a valid
+ <code>int</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as an <code>int</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to an <code>int</code>.
+
+ @param name property name.
+ @param value <code>int</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="long"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>long</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>long</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>long</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>long</code>.
+
+ @param name property name.
+ @param value <code>long</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="float"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>float</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>float</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>float</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getBoolean" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="boolean"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>boolean</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>boolean</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>boolean</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setBoolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>boolean</code>.
+
+ @param name property name.
+ @param value <code>boolean</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Parse the given attribute as a set of integer ranges
+ @param name the attribute name
+ @param defaultValue the default value if it is not set
+ @return a new set of ranges from the configured value]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ a collection of <code>String</code>s.
+ If no such property is specified then empty collection is returned.
+ <p>
+ This is an optimized version of {@link #getStrings(String)}
+
+ @param name property name.
+ @return property value as a collection of <code>String</code>s.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then <code>null</code> is returned.
+
+ @param name property name.
+ @return property value as an array of <code>String</code>s,
+ or <code>null</code>.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then default value is returned.
+
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of <code>String</code>s,
+ or default value.]]>
+ </doc>
+ </method>
+ <method name="setStrings"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="values" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Set the array of string values for the <code>name</code> property as
+ as comma delimited values.
+
+ @param name property name.
+ @param values The values]]>
+ </doc>
+ </method>
+ <method name="getClassByName" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Load a class by name.
+
+ @param name the class name.
+ @return the class object.
+ @throws ClassNotFoundException if the class is not found.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>.
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;? extends U&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends U&gt;"/>
+ <param name="xface" type="java.lang.Class&lt;U&gt;"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>
+ implementing the interface specified by <code>xface</code>.
+
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ An exception is thrown if the returned class does not implement the named
+ interface.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @param xface the interface implemented by the named class.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="xface" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to the name of a
+ <code>theClass</code> implementing the given interface <code>xface</code>.
+
+ An exception is thrown if <code>theClass</code> does not implement the
+ interface <code>xface</code>.
+
+ @param name property name.
+ @param theClass property value.
+ @param xface the interface implemented by the named class.]]>
+ </doc>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file under a directory named by <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file name under a directory named in <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getResource" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the {@link URL} for the named resource.
+
+ @param name resource name.
+ @return the url for the named resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get an input stream attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return an input stream attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsReader" return="java.io.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a {@link Reader} attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return a reader attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;java.util.Map.Entry&lt;java.lang.String, java.lang.String&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get an {@link Iterator} to go through the list of <code>String</code>
+ key-value pairs in the configuration.
+
+ @return an iterator over the entries.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write out the non-default properties in this configuration to the give
+ {@link OutputStream}.
+
+ @param out the output stream to write to.]]>
+ </doc>
+ </method>
+ <method name="getClassLoader" return="java.lang.ClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link ClassLoader} for this job.
+
+ @return the correct class loader.]]>
+ </doc>
+ </method>
+ <method name="setClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="classLoader" type="java.lang.ClassLoader"/>
+ <doc>
+ <![CDATA[Set the class loader that will be used to load the various objects.
+
+ @param classLoader the new class loader.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setQuietMode"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="quietmode" type="boolean"/>
+ <doc>
+ <![CDATA[Set the quiteness-mode.
+
+ In the the quite-mode error and informational messages might not be logged.
+
+ @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
+ to turn it off.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[For debugging. List non-default properties to the terminal and exit.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provides access to configuration parameters.
+
+ <h4 id="Resources">Resources</h4>
+
+ <p>Configurations are specified by resources. A resource contains a set of
+ name/value pairs as XML data. Each resource is named by either a
+ <code>String</code> or by a {@link Path}. If named by a <code>String</code>,
+ then the classpath is examined for a file with that name. If named by a
+ <code>Path</code>, then the local filesystem is examined directly, without
+ referring to the classpath.
+
+ <p>Hadoop by default specifies two resources, loaded in-order from the
+ classpath: <ol>
+ <li><tt><a href="{@docRoot}/../hadoop-default.html">hadoop-default.xml</a>
+ </tt>: Read-only defaults for hadoop.</li>
+ <li><tt>hadoop-site.xml</tt>: Site-specific configuration for a given hadoop
+ installation.</li>
+ </ol>
+ Applications may add additional resources, which are loaded
+ subsequent to these resources in the order they are added.
+
+ <h4 id="FinalParams">Final Parameters</h4>
+
+ <p>Configuration parameters may be declared <i>final</i>.
+ Once a resource declares a value final, no subsequently-loaded
+ resource can alter that value.
+ For example, one might define a final parameter with:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;dfs.client.buffer.dir&lt;/name&gt;
+ &lt;value&gt;/tmp/hadoop/dfs/client&lt;/value&gt;
+ <b>&lt;final&gt;true&lt;/final&gt;</b>
+ &lt;/property&gt;</pre></tt>
+
+ Administrators typically define parameters as final in
+ <tt>hadoop-site.xml</tt> for values that user applications may not alter.
+
+ <h4 id="VariableExpansion">Variable Expansion</h4>
+
+ <p>Value strings are first processed for <i>variable expansion</i>. The
+ available properties are:<ol>
+ <li>Other properties defined in this Configuration; and, if a name is
+ undefined here,</li>
+ <li>Properties in {@link System#getProperties()}.</li>
+ </ol>
+
+ <p>For example, if a configuration resource contains the following property
+ definitions:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;basedir&lt;/name&gt;
+ &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
+ &lt;/property&gt;
+
+ &lt;property&gt;
+ &lt;name&gt;tempdir&lt;/name&gt;
+ &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt;
+ &lt;/property&gt;</pre></tt>
+
+ When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt>
+ will be resolved to another property in this Configuration, while
+ <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value
+ of the System property with that name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration -->
+ <!-- start class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <class name="Configuration.IntegerRanges" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Configuration.IntegerRanges"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Configuration.IntegerRanges" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isIncluded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Is the given value in the set of ranges
+ @param value the value to check
+ @return is the value in the ranges?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A class that represents a set of positive integer ranges. It parses
+ strings of the form: "2-3,5,7-" where ranges are separated by comma and
+ the lower/upper bounds are separated by dash. Either the lower or upper
+ bound may be omitted meaning all values up to or over. So the string
+ above means 2, 3, 5, and 7, 8, 9, ...]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <!-- start class org.apache.hadoop.conf.Configured -->
+ <class name="Configured" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Configured"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configured" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Base class for things that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configured -->
+</package>
+<package name="org.apache.hadoop.filecache">
+ <!-- start class org.apache.hadoop.filecache.DistributedCache -->
+ <class name="DistributedCache" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedCache"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param fileStatus The file status on the dfs.
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred is
+ returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred
+ is returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="releaseCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is the opposite of getlocalcache. When you are done with
+ using the cache, you need to release the cache
+ @param cache The cache URI to be released
+ @param conf configuration which contains the filesystem the cache
+ is contained in.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeRelative" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTimestamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="cache" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns mtime of a given cache file on hdfs.
+ @param conf configuration
+ @param cache cache file
+ @return mtime of a given cache file on hdfs
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createAllSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="jobCacheDir" type="java.io.File"/>
+ <param name="workDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method create symlinks for all files in a given dir in another directory
+ @param conf the configuration
+ @param jobCacheDir the target directory for creating symlinks
+ @param workDir the directory in which the symlinks are created
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setCacheArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archives" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of archives
+ @param archives The list of archives that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="setCacheFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of files
+ @param files The list of files that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="getCacheArchives" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache archives set in the Configuration
+ @param conf The configuration which contains the archives
+ @return A URI array of the caches set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCacheFiles" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache files set in the Configuration
+ @param conf The configuration which contains the files
+ @return A URI array of the files set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheArchives" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized caches
+ @param conf Configuration that contains the localized archives
+ @return A path array of localized caches
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized files
+ @param conf Configuration that contains the localized files
+ @return A path array of localized files
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getArchiveTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the archives
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFileTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the files
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setArchiveTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the archives to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of archives.
+ The order should be the same as the order in which the archives are added.]]>
+ </doc>
+ </method>
+ <method name="setFileTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the files to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of files.
+ The order should be the same as the order in which the files are added.]]>
+ </doc>
+ </method>
+ <method name="setLocalArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized archives
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+ </doc>
+ </method>
+ <method name="setLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized files
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+ </doc>
+ </method>
+ <method name="addCacheArchive"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a archives to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addCacheFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a file to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addFileToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an file path to the current set of classpath entries It adds the file
+ to cache as well.
+
+ @param file Path of the file to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getFileClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the file entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="addArchiveToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archive" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an archive path to the current set of classpath entries. It adds the
+ archive to cache as well.
+
+ @param archive Path of the archive to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getArchiveClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the archive entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method allows you to create symlinks in the current working directory
+ of the task to all the cache files/archives
+ @param conf the jobconf]]>
+ </doc>
+ </method>
+ <method name="getSymlink" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method checks to see if symlinks are to be create for the
+ localized cache files in the current working directory
+ @param conf the jobconf
+ @return true if symlinks are to be created- else return false]]>
+ </doc>
+ </method>
+ <method name="checkURIs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uriFiles" type="java.net.URI[]"/>
+ <param name="uriArchives" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[This method checks if there is a conflict in the fragment names
+ of the uris. Also makes sure that each uri has a fragment. It
+ is only to be called if you want to create symlinks for
+ the various archives and files.
+ @param uriFiles The uri array of urifiles
+ @param uriArchives the uri array of uri archives]]>
+ </doc>
+ </method>
+ <method name="purgeCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clear the entire contents of the cache and delete the backing files. This
+ should only be used when the server is reinitializing, because the users
+ are going to lose their files.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Distribute application-specific large, read-only files efficiently.
+
+ <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
+ framework to cache files (text, archives, jars etc.) needed by applications.
+ </p>
+
+ <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
+ via the {@link JobConf}. The <code>DistributedCache</code> assumes that the
+ files specified via hdfs:// urls are already present on the
+ {@link FileSystem} at the path specified by the url.</p>
+
+ <p>The framework will copy the necessary files on to the slave node before
+ any tasks for the job are executed on that node. Its efficiency stems from
+ the fact that the files are only copied once per job and the ability to
+ cache archives which are un-archived on the slaves.</p>
+
+ <p><code>DistributedCache</code> can be used to distribute simple, read-only
+ data/text files and/or more complex types such as archives, jars etc.
+ Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes.
+ Jars may be optionally added to the classpath of the tasks, a rudimentary
+ software distribution mechanism. Files have execution permissions.
+ Optionally users can also direct it to symlink the distributed cache file(s)
+ into the working directory of the task.</p>
+
+ <p><code>DistributedCache</code> tracks modification timestamps of the cache
+ files. Clearly the cache files should not be modified by the application
+ or externally while the job is executing.</p>
+
+ <p>Here is an illustrative example on how to use the
+ <code>DistributedCache</code>:</p>
+ <p><blockquote><pre>
+ // Setting up the cache for the application
+
+ 1. Copy the requisite files to the <code>FileSystem</code>:
+
+ $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
+ $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
+ $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
+ $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
+ $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
+ $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
+
+ 2. Setup the application's <code>JobConf</code>:
+
+ JobConf job = new JobConf();
+ DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
+ job);
+ DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
+ DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
+
+ 3. Use the cached files in the {@link Mapper} or {@link Reducer}:
+
+ public static class MapClass extends MapReduceBase
+ implements Mapper&lt;K, V, K, V&gt; {
+
+ private Path[] localArchives;
+ private Path[] localFiles;
+
+ public void configure(JobConf job) {
+ // Get the cached archives/files
+ localArchives = DistributedCache.getLocalCacheArchives(job);
+ localFiles = DistributedCache.getLocalCacheFiles(job);
+ }
+
+ public void map(K key, V value,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Use data from the cached archives/files here
+ // ...
+ // ...
+ output.collect(k, v);
+ }
+ }
+
+ </pre></blockquote></p>
+
+ @see JobConf
+ @see JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.filecache.DistributedCache -->
+</package>
+<package name="org.apache.hadoop.fs">
+ <!-- start class org.apache.hadoop.fs.BlockLocation -->
+ <class name="BlockLocation" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlockLocation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with host, name, offset and length]]>
+ </doc>
+ </constructor>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hosts (hostname) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of names (hostname:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the length of the block]]>
+ </doc>
+ </method>
+ <method name="setOffset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <doc>
+ <![CDATA[Set the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="setLength"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="length" type="long"/>
+ <doc>
+ <![CDATA[Set the length of block]]>
+ </doc>
+ </method>
+ <method name="setHosts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hosts" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the hosts hosting this block]]>
+ </doc>
+ </method>
+ <method name="setNames"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the names (host:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement write of Writable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement readFields of Writable]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BlockLocation -->
+ <!-- start class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <class name="BufferedFSInputStream" extends="java.io.BufferedInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="BufferedFSInputStream" type="org.apache.hadoop.fs.FSInputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a <code>BufferedFSInputStream</code>
+ with the specified buffer size,
+ and saves its argument, the input stream
+ <code>in</code>, for later use. An internal
+ buffer array of length <code>size</code>
+ is created and stored in <code>buf</code>.
+
+ @param in the underlying input stream.
+ @param size the buffer size.
+ @exception IllegalArgumentException if size <= 0.]]>
+ </doc>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A class optimizes reading from FSInputStream by bufferring]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <!-- start class org.apache.hadoop.fs.ChecksumException -->
+ <class name="ChecksumException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumException" type="java.lang.String, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Thrown for checksum errors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumException -->
+ <!-- start class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <class name="ChecksumFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getApproxChkSumLength" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getRawFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the raw file system]]>
+ </doc>
+ </method>
+ <method name="getChecksumFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return the name of the checksum file associated with a file.]]>
+ </doc>
+ </method>
+ <method name="isChecksumFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return true iff file is a checksum file name.]]>
+ </doc>
+ </method>
+ <method name="getChecksumFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileSize" type="long"/>
+ <doc>
+ <![CDATA[Return the length of the checksum file given the size of the
+ actual file.]]>
+ </doc>
+ </method>
+ <method name="getBytesPerSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the bytes Per Checksum]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getChecksumLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ <param name="bytesPerSum" type="int"/>
+ <doc>
+ <![CDATA[Calculated the length of the checksum file in bytes.
+ @param size the length of the data file in bytes
+ @param bytesPerSum the number of bytes in a checksum block
+ @return the number of bytes in the checksum file]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+ Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt>
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename files/dirs]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement the delete(Path, boolean) in checksum
+ file system.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="copyCrc" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ If src and dst are directories, the copyCrc parameter
+ determines whether to copy CRC files.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Report a checksum error to the file system.
+ @param f the file name containing the error
+ @param in the stream open on the file
+ @param inPos the position of the beginning of the bad data in the file
+ @param sums the stream open on the checksum file
+ @param sumsPos the position of the beginning of the bad data in the checksum file
+ @return if retry is neccessary]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract Checksumed FileSystem.
+ It provide a basice implementation of a Checksumed FileSystem,
+ which creates a checksum file for each raw file.
+ It generates & verifies checksums at the client side.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ContentSummary -->
+ <class name="ContentSummary" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ContentSummary"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the length]]>
+ </doc>
+ </method>
+ <method name="getDirectoryCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the directory count]]>
+ </doc>
+ </method>
+ <method name="getFileCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the file count]]>
+ </doc>
+ </method>
+ <method name="getQuota" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the directory quota]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHeader" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the header of the output.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the header of the output]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the string representation of the object in the output format.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the string representation of the object]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store the summary of a content (a directory or a file).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ContentSummary -->
+ <!-- start class org.apache.hadoop.fs.DF -->
+ <class name="DF" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DF" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="DF" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFilesystem" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAvailable" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPercentUsed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMount" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="DF_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'df' program.
+ Tested on Linux, FreeBSD, Cygwin.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DF -->
+ <!-- start class org.apache.hadoop.fs.DU -->
+ <class name="DU" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DU" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param interval refresh the disk usage at this interval
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <constructor name="DU" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param conf configuration object
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <method name="decDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Decrease how much disk space we use.
+ @param value decrease by this value]]>
+ </doc>
+ </method>
+ <method name="incDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Increase how much disk space we use.
+ @param value increase by this value]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return disk space used
+ @throws IOException if the shell command fails]]>
+ </doc>
+ </method>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the path of which we're keeping track of disk usage]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Start the disk usage checking thread.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down the refreshing thread.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'du' program]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DU -->
+ <!-- start class org.apache.hadoop.fs.FileStatus -->
+ <class name="FileStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <constructor name="FileStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this a directory?
+ @return true if this is a directory]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the block size of the file.
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the replication factor of a file.
+ @return the replication factor of a file.]]>
+ </doc>
+ </method>
+ <method name="getModificationTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the modification time of the file.
+ @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get FsPermission associated with the file.
+ @return permssion. If a filesystem does not have a notion of permissions
+ or if permissions could not be determined, then default
+ permissions equivalent of "rwxrwxrwx" is returned.]]>
+ </doc>
+ </method>
+ <method name="getOwner" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the owner of the file.
+ @return owner of the file. The string could be empty if there is no
+ notion of owner of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getGroup" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the group associated with the file.
+ @return group for the file. The string could be empty if there is no
+ notion of group of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Sets permission.
+ @param permission if permission is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="owner" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets owner.
+ @param owner if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setGroup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets group.
+ @param group if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare this object to another object
+
+ @param o the object to be compared.
+ @return a negative integer, zero, or a positive integer as this object
+ is less than, equal to, or greater than the specified object.
+
+ @throws ClassCastException if the specified object's is not of
+ type FileStatus]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare if this object is equal to another object
+ @param o the object to be compared.
+ @return true if two file status has the same path name; false if not.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for the object, which is defined as
+ the hash code of the path name.
+
+ @return a hash code value for the path name.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that represents the client side information for a file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileStatus -->
+ <!-- start class org.apache.hadoop.fs.FileSystem -->
+ <class name="FileSystem" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="FileSystem"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseArgs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="i" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the cmd-line args, starting at i. Remove consumed args
+ from array. We expect param in the form:
+ '-local | -dfs <namenode:port>']]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the configured filesystem implementation.]]>
+ </doc>
+ </method>
+ <method name="getDefaultUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default filesystem URI from a configuration.
+ @param conf the configuration to access
+ @return the uri of the default filesystem]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.net.URI"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="getNamed" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="call #get(URI,Configuration) instead.">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated call #get(URI,Configuration) instead.]]>
+ </doc>
+ </method>
+ <method name="getLocal" return="org.apache.hadoop.fs.LocalFileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the local file syste
+ @param conf the configuration to configure the file system with
+ @return a LocalFileSystem]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the FileSystem for this URI's scheme and authority. The scheme
+ of the URI determines a configuration property name,
+ <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
+ The entire URI is passed to the FileSystem instance's initialize method.]]>
+ </doc>
+ </method>
+ <method name="closeAll"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all cached filesystems. Be sure those filesystems are not
+ used anymore.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a file with the provided permission
+ The permission of the file is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ It is implemented using two RPCs. It is understood that it is inefficient,
+ but the implementation is thread-safe. The other option is to change the
+ value of umask in configuration to be 0, but it is not thread-safe.
+
+ @param fs file system handle
+ @param file the name of the file to be created
+ @param permission the permission of the file
+ @return an output stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a directory with the provided permission
+ The permission of the directory is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ @see #create(FileSystem, Path, FsPermission)
+
+ @param fs file system handle
+ @param dir the name of the directory to be created
+ @param permission the permission of the directory
+ @return true if the directory creation succeeds; false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getFileBlockLocations(FileStatus, long, long)}">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.
+ @deprecated use {@link #getFileBlockLocations(FileStatus, long, long)}]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file to open]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param permission
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize
+ @param progress
+ @throws IOException
+ @see #setPermission(Path, FsPermission)]]>
+ </doc>
+ </method>
+ <method name="createNewFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the given Path as a brand-new zero-length file. If
+ create fails, or if it already existed, return false.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, getConf().getInt("io.file.buffer.size", 4096), null)
+ @param f the existing file to be appended.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, bufferSize, null).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @param progress for reporting progress if it is not null.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get replication.
+
+ @deprecated Use getFileStatus() instead
+ @param src file name
+ @return file replication
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file.
+
+ @param f the path to delete.
+ @param recursive if path is a directory and set to
+ true, the directory is deleted else throws an exception. In
+ case of a file the recursive can be set to either true or false.
+ @return true if delete is successful else false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="deleteOnExit" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a path to be deleted when FileSystem is closed.
+ When the JVM shuts down,
+ all FileSystem objects will be closed automatically.
+ Then,
+ the marked path will be deleted as a result of closing the FileSystem.
+
+ The path has to exist in the file system.
+
+ @param f the path to delete.
+ @return true if deleteOnExit is successful, otherwise false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="processDeleteOnExit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Delete all files that were marked as delete-on-exit. This recursively
+ deletes all files in the specified paths.]]>
+ </doc>
+ </method>
+ <method name="exists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if exists.
+ @param f source file]]>
+ </doc>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[True iff the named path is a regular file.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the {@link ContentSummary} of a given {@link Path}.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given path using the user-supplied path
+ filter.
+
+ @param f
+ a path name
+ @param filter
+ the user-supplied path filter
+ @return an array of FileStatus objects for the files under the given path
+ after applying the filter
+ @throws IOException
+ if encounter any problem while fetching the status]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using default
+ path filter.
+
+ @param files
+ a list of paths
+ @return a list of statuses for the files under the given paths after
+ applying the filter default Path filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using user-supplied
+ path filter.
+
+ @param files
+ a list of paths
+ @param filter
+ the user-supplied path filter
+ @return a list of statuses for the files under the given paths after
+ applying the filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Return all the files that match filePattern and are not checksum
+ files. Results are sorted by their names.
+
+ <p>
+ A filename pattern is composed of <i>regular</i> characters and
+ <i>special pattern matching</i> characters, which are:
+
+ <dl>
+ <dd>
+ <dl>
+ <p>
+ <dt> <tt> ? </tt>
+ <dd> Matches any single character.
+
+ <p>
+ <dt> <tt> * </tt>
+ <dd> Matches zero or more characters.
+
+ <p>
+ <dt> <tt> [<i>abc</i>] </tt>
+ <dd> Matches a single character from character set
+ <tt>{<i>a,b,c</i>}</tt>.
+
+ <p>
+ <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
+ <dd> Matches a single character from the character range
+ <tt>{<i>a...b</i>}</tt>. Note that character <tt><i>a</i></tt> must be
+ lexicographically less than or equal to character <tt><i>b</i></tt>.
+
+ <p>
+ <dt> <tt> [^<i>a</i>] </tt>
+ <dd> Matches a single character that is not from character set or range
+ <tt>{<i>a</i>}</tt>. Note that the <tt>^</tt> character must occur
+ immediately to the right of the opening bracket.
+
+ <p>
+ <dt> <tt> \<i>c</i> </tt>
+ <dd> Removes (escapes) any special meaning of character <i>c</i>.
+
+ <p>
+ <dt> <tt> {ab,cd} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt>
+
+ <p>
+ <dt> <tt> {ab,c{de,fh}} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cde, cfh</i>}</tt>
+
+ </dl>
+ </dd>
+ </dl>
+
+ @param pathPattern a regular expression specifying a pth pattern
+
+ @return an array of paths that match the path pattern
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array of FileStatus objects whose path names match pathPattern
+ and is accepted by the user-supplied path filter. Results are sorted by
+ their path names.
+ Return null if pathPattern has no glob and the path does not exist.
+ Return an empty array if pathPattern has a glob and no path matches it.
+
+ @param pathPattern
+ a regular expression specifying the path pattern
+ @param filter
+ a user-supplied path filter
+ @return an array of FileStatus objects
+ @throws IOException if any I/O error occurs when fetching file status]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the current user's home directory in this filesystem.
+ The default implementation returns "/user/$USER/".]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param new_dir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #mkdirs(Path, FsPermission)} with default permission.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make the given file and all non-existent parents into
+ directories. Has the semantics of Unix 'mkdir -p'.
+ Existence of the directory hierarchy is not an error.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name and the source is kept intact afterwards]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files are on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="moveToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ Remove the source afterwards]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[No more filesystem operations are needed. Will
+ release any held locks.]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total size of all files in the filesystem.]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a file status object that represents the path.
+ @param f The path we want information from
+ @return a FileStatus object
+ @throws FileNotFoundException when the path does not exist;
+ IOException see specific implementation]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permission of a path.
+ @param p
+ @param permission]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set owner of a path (i.e. a file or a directory).
+ The parameters username and groupname cannot both be null.
+ @param p The path
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.]]>
+ </doc>
+ </method>
+ <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class&lt;? extends org.apache.hadoop.fs.FileSystem&gt;"/>
+ <doc>
+ <![CDATA[Get the statistics for a particular file system
+ @param cls the class to lookup
+ @return a statistics object]]>
+ </doc>
+ </method>
+ <method name="printStatistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="statistics" type="org.apache.hadoop.fs.FileSystem.Statistics"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The statistics for this file system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An abstract base class for a fairly generic filesystem. It
+ may be implemented as a distributed filesystem, or as a "local"
+ one that reflects the locally-connected disk. The local version
+ exists for small Hadoop instances and for testing.
+
+ <p>
+
+ All user code that may potentially use the Hadoop Distributed
+ File System should be written to use a FileSystem object. The
+ Hadoop DFS is a multi-machine system that appears as a single
+ disk. It's useful because of its fault tolerance and potentially
+ very large capacity.
+
+ <p>
+ The local implementation is {@link LocalFileSystem} and distributed
+ implementation is {@link DistributedFileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem -->
+ <!-- start class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <class name="FileSystem.Statistics" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="incrementBytesRead"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes read in the statistics
+ @param newBytes the additional bytes read]]>
+ </doc>
+ </method>
+ <method name="incrementBytesWritten"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes written in the statistics
+ @param newBytes the additional bytes written]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes read
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes written
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <!-- start class org.apache.hadoop.fs.FileUtil -->
+ <class name="FileUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path
+
+ @param stats
+ an array of FileStatus objects
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path.
+ If stats if null, return path
+ @param stats
+ an array of FileStatus objects
+ @param path
+ default path to return in stats is null
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="fullyDelete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a directory and all its contents. If
+ we return false, the directory may be partially-deleted.]]>
+ </doc>
+ </method>
+ <method name="fullyDelete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recursively delete a directory.
+
+ @param fs {@link FileSystem} on which the path is present
+ @param dir directory to recursively delete
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copyMerge" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dstFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="addString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy all files in a directory to one output file (merge).]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy local files to a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="java.io.File"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy FileSystem files to local files.]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param filename The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="getDU" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Takes an input dir and returns the du on that local directory. Very basic
+ implementation.
+
+ @param dir
+ The input dir to get the disk space of this local dir
+ @return The total disk space of the input local directory]]>
+ </doc>
+ </method>
+ <method name="unZip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="unzipDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a File input it will unzip the file in a the unzip directory
+ passed as the second parameter
+ @param inFile The zip file as input
+ @param unzipDir The unzip directory where to unzip the zip file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unTar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="untarDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a Tar File as input it will untar the file in a the untar directory
+ passed as the second parameter
+
+ This utility will untar ".tar" files and ".tar.gz","tgz" files.
+
+ @param inFile The tar file as input.
+ @param untarDir The untar directory where to untar the tar file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="symLink" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="linkname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a soft link between a src and destination
+ only on a local disk. HDFS does not support this
+ @param target the target for symlink
+ @param linkname the symlink
+ @return value returned by the command]]>
+ </doc>
+ </method>
+ <method name="chmod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="perm" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Change the permissions on a filename.
+ @param filename the name of the file to change
+ @param perm the permission string
+ @return the exit code from the command
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="createLocalTempFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="basefile" type="java.io.File"/>
+ <param name="prefix" type="java.lang.String"/>
+ <param name="isDeleteOnExit" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a tmp file for a base file.
+ @param basefile the base file of the tmp
+ @param prefix file name prefix of tmp
+ @param isDeleteOnExit if true, the tmp will be deleted when the VM exits
+ @return a newly created tmp file
+ @exception IOException If a tmp file cannot created
+ @see java.io.File#createTempFile(String, String, File)
+ @see java.io.File#deleteOnExit()]]>
+ </doc>
+ </method>
+ <method name="replaceFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="target" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move the src file to the name specified by target.
+ @param src the source file
+ @param target the target file
+ @exception IOException If this operation fails]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of file-processing util methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil -->
+ <!-- start class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <class name="FileUtil.HardLink" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil.HardLink"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createHardLink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.io.File"/>
+ <param name="linkName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a hardlink]]>
+ </doc>
+ </method>
+ <method name="getLinkCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Retrieves the number of links to the specified file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Class for creating hardlinks.
+ Supports Unix, Cygwin, WindXP.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <!-- start class org.apache.hadoop.fs.FilterFileSystem -->
+ <class name="FilterFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FilterFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FilterFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List files in a directory.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param newDir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get file status.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A <code>FilterFileSystem</code> contains
+ some other file system, which it uses as
+ its basic file system, possibly transforming
+ the data along the way or providing additional
+ functionality. The class <code>FilterFileSystem</code>
+ itself simply overrides all methods of
+ <code>FileSystem</code> with versions that
+ pass all requests to the contained file
+ system. Subclasses of <code>FilterFileSystem</code>
+ may further override some of these methods
+ and may also provide additional methods
+ and fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FilterFileSystem -->
+ <!-- start class org.apache.hadoop.fs.FSDataInputStream -->
+ <class name="FSDataInputStream" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSDataInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="desired" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
+ and buffers input through a {@link BufferedInputStream}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSDataOutputStream -->
+ <class name="FSDataOutputStream" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Syncable"/>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWrappedStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link OutputStream} in a {@link DataOutputStream},
+ buffers output through a {@link BufferedOutputStream} and creates a checksum
+ file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataOutputStream -->
+ <!-- start class org.apache.hadoop.fs.FSError -->
+ <class name="FSError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Thrown for unexpected filesystem errors, presumed to reflect disk errors
+ in the native filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSError -->
+ <!-- start class org.apache.hadoop.fs.FSInputChecker -->
+ <class name="FSInputChecker" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs]]>
+ </doc>
+ </constructor>
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int, boolean, java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs
+ @param sum the type of Checksum engine
+ @param chunkSize maximun chunk size
+ @param checksumSize the number byte of each checksum]]>
+ </doc>
+ </constructor>
+ <method name="readChunk" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads in next checksum chunk data into <code>buf</code> at <code>offset</code>
+ and checksum into <code>checksum</code>.
+ The method is used for implementing read, therefore, it should be optimized
+ for sequential reading
+ @param pos chunkPos
+ @param buf desitination buffer
+ @param offset offset in buf at which to store data
+ @param len maximun number of bytes to read
+ @return number of bytes read]]>
+ </doc>
+ </method>
+ <method name="getChunkPosition" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <doc>
+ <![CDATA[Return position of beginning of chunk containing pos.
+
+ @param pos a postion in the file
+ @return the starting position of the chunk which contains the byte]]>
+ </doc>
+ </method>
+ <method name="needChecksum" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if there is a need for checksum verification]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read one checksum-verified byte
+
+ @return the next byte of data, or <code>-1</code> if the end of the
+ stream is reached.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read checksum verified bytes from this byte-input stream into
+ the specified byte array, starting at the given offset.
+
+ <p> This method implements the general contract of the corresponding
+ <code>{@link InputStream#read(byte[], int, int) read}</code> method of
+ the <code>{@link InputStream}</code> class. As an additional
+ convenience, it attempts to read as many bytes as possible by repeatedly
+ invoking the <code>read</code> method of the underlying stream. This
+ iterated <code>read</code> continues until one of the following
+ conditions becomes true: <ul>
+
+ <li> The specified number of bytes have been read,
+
+ <li> The <code>read</code> method of the underlying stream returns
+ <code>-1</code>, indicating end-of-file.
+
+ </ul> If the first <code>read</code> on the underlying stream returns
+ <code>-1</code> to indicate end-of-file then this method returns
+ <code>-1</code>. Otherwise this method returns the number of bytes
+ actually read.
+
+ @param b destination buffer.
+ @param off offset at which to start storing bytes.
+ @param len maximum number of bytes to read.
+ @return the number of bytes read, or <code>-1</code> if the end of
+ the stream has been reached.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if any checksum error occurs]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over and discards <code>n</code> bytes of data from the
+ input stream.
+
+ <p>This method may skip more bytes than are remaining in the backing
+ file. This produces no exception and the number of bytes skipped
+ may include some number of bytes that were beyond the EOF of the
+ backing file. Attempting to read from the stream after skipping past
+ the end will result in -1 indicating the end of the file.
+
+<p>If <code>n</code> is negative, no bytes are skipped.
+
+ @param n the number of bytes to be skipped.
+ @return the actual number of bytes skipped.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to skip to is corrupted]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given position in the stream.
+ The next read() will be from that position.
+
+ <p>This method may seek past the end of the file.
+ This produces no exception and an attempt to read from
+ the stream will result in -1 indicating the end of the file.
+
+ @param pos the postion to seek to.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to seek to is corrupted]]>
+ </doc>
+ </method>
+ <method name="readFully" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="stm" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A utility function that tries to read up to <code>len</code> bytes from
+ <code>stm</code>
+
+ @param stm an input stream
+ @param buf destiniation buffer
+ @param offset offset at which to store data
+ @param len number of bytes to read
+ @return actual number of bytes read
+ @throws IOException if there is any IO error]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sum" type="java.util.zip.Checksum"/>
+ <param name="maxChunkSize" type="int"/>
+ <param name="checksumSize" type="int"/>
+ <doc>
+ <![CDATA[Set the checksum related parameters
+ @param sum which type of checksum to use
+ @param maxChunkSize maximun chunk size
+ @param checksumSize checksum size]]>
+ </doc>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="readlimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="file" type="org.apache.hadoop.fs.Path"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file name from which data is read from]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This is a generic input stream for verifying checksums for
+ data before it is read by a user.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputChecker -->
+ <!-- start class org.apache.hadoop.fs.FSInputStream -->
+ <class name="FSInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSInputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="seek"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[FSInputStream is a generic old InputStream with a little bit
+ of RAF-style seek ability.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSOutputSummer -->
+ <class name="FSOutputSummer" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSOutputSummer" type="java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="writeChunk"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write one byte]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes <code>len</code> bytes from the specified byte array
+ starting at offset <code>off</code> and generate a checksum for
+ each data chunk.
+
+ <p> This method stores bytes from the given array into this
+ stream's buffer before it gets checksumed. The buffer gets checksumed
+ and flushed to the underlying output stream when all data
+ in a checksum chunk are in the buffer. If the buffer is empty and
+ requested length is at least as large as the size of next checksum chunk
+ size, this method will checksum and write the chunk directly
+ to the underlying output stream. Thus it avoids uneccessary data copy.
+
+ @param b the data.
+ @param off the start offset in the data.
+ @param len the number of bytes to write.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This is a generic output stream for generating checksums for
+ data before it is written to the underlying stream]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSOutputSummer -->
+ <!-- start class org.apache.hadoop.fs.FsShell -->
+ <class name="FsShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="FsShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentTrashDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the Trash object associated with this shell.]]>
+ </doc>
+ </method>
+ <method name="byteDesc" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="long"/>
+ <doc>
+ <![CDATA[Return an abbreviated English-language desc of the byte length]]>
+ </doc>
+ </method>
+ <method name="limitDecimalTo2" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dateForm" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="modifFmt" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provide command line access to a FileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsShell -->
+ <!-- start class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <class name="FsUrlStreamHandlerFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.net.URLStreamHandlerFactory"/>
+ <constructor name="FsUrlStreamHandlerFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsUrlStreamHandlerFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createURLStreamHandler" return="java.net.URLStreamHandler"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Factory for URL stream handlers.
+
+ There is only one handler whose job is to create UrlConnections. A
+ FsUrlConnection relies on FileSystem to choose the appropriate FS
+ implementation.
+
+ Before returning our handler, we make sure that FileSystem knows an
+ implementation for the requested scheme/protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <!-- start class org.apache.hadoop.fs.HarFileSystem -->
+ <class name="HarFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HarFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[public construction of harfilesystem]]>
+ </doc>
+ </constructor>
+ <constructor name="HarFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor to create a HarFileSystem with an
+ underlying filesystem.
+ @param fs]]>
+ </doc>
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a Har filesystem per har archive. The
+ archive home directory is the top level directory
+ in the filesystem that contains the HAR archive.
+ Be careful with this method, you do not want to go
+ on creating new Filesystem instances per call to
+ path.getFileSystem().
+ the uri of Har is
+ har://underlyingfsscheme-host:port/archivepath.
+ or
+ har:///archivepath. This assumes the underlying filesystem
+ to be used in case not specified.]]>
+ </doc>
+ </method>
+ <method name="getHarVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive.]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the uri of this filesystem.
+ The uri is of the form
+ har://underlyingfsschema-host:port/pathintheunderlyingfs]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[get block locations from the underlying fs
+ @param file the input filestatus to get block locations
+ @param start the start in the file
+ @param len the length in the file
+ @return block locations for this segment of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getHarHash" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[the hash of the path p inside iniside
+ the filesystem
+ @param p the path in the harfilesystem
+ @return the hash code of the path.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[return the filestatus of files in har archive.
+ The permission returned are that of the archive
+ index files. The permissions are not persisted
+ while creating a hadoop archive.
+ @param f the path in har filesystem
+ @return filestatus.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a har input stream which fakes end of
+ file. It reads the index files to get the part
+ file name and the size and start of the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[liststatus returns the children of a directory
+ after looking up the index files.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive path.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[copies the file in the har filesystem to a local file.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permisssion" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <field name="VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is an implementation of the Hadoop Archive
+ Filesystem. This archive Filesystem has index files
+ of the form _index* and has contents of the form
+ part-*. The index files store the indexes of the
+ real files. The index files are of the form _masterindex
+ and _index. The master index is a level of indirection
+ in to the index file to make the look ups faster. the index
+ file is sorted with hash code of the paths that it contains
+ and the master index contains pointers to the positions in
+ index for ranges of hashcodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.HarFileSystem -->
+ <!-- start class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <class name="InMemoryFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InMemoryFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InMemoryFileSystem" type="java.net.URI, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reserveSpaceWithCheckSum" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Register a file with its size. This will also register a checksum for the
+ file that the user is trying to create. This is required since none of
+ the FileSystem APIs accept the size of the file as argument. But since it
+ is required for us to apriori know the size of the file we are going to
+ create, the user must call this method for each file he wants to create
+ and reserve memory for that file. We either succeed in reserving memory
+ for both the main file and the checksum file and return true, or return
+ false.]]>
+ </doc>
+ </method>
+ <method name="getFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getNumFiles" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getFSSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPercentUsed" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of the in-memory filesystem. This implementation assumes
+ that the file lengths are known ahead of time and the total lengths of all
+ the files is below a certain number (like 100 MB, configurable). Use the API
+ reserveSpaceWithCheckSum(Path f, int size) (see below for a description of
+ the API for reserving space in the FS. The uri of this filesystem starts with
+ ramfs:// .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <!-- start class org.apache.hadoop.fs.LocalDirAllocator -->
+ <class name="LocalDirAllocator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalDirAllocator" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an allocator object
+ @param contextCfgItemName]]>
+ </doc>
+ </constructor>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. This method should be used if the size of
+ the file is not known apriori. We go round-robin over the set of disks
+ (via the configured dirs) and return the first complete path where
+ we could create the parent directory of the passed path.
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. Pass size as -1 if not known apriori. We
+ round-robin over the set of disks (via the configured dirs) and return
+ the first complete path which has enough space
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathToRead" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS for reading. We search through all the
+ configured dirs for the file's existence and return the complete
+ path to the file when we find one
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createTmpFileForWrite" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a temporary file in the local FS. Pass size as -1 if not known
+ apriori. We round-robin over the set of disks (via the configured dirs)
+ and select the first complete path which has enough space. A file is
+ created on this directory. The file is guaranteed to go away when the
+ JVM exits.
+ @param pathStr prefix for the temporary file
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return a unique temporary file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isContextValid" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextCfgItemName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Method to check whether a context is valid
+ @param contextCfgItemName
+ @return true/false]]>
+ </doc>
+ </method>
+ <method name="ifExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[We search through all the configured dirs for the file's existence
+ and return true when we find
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return true if files exist. false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of a round-robin scheme for disk allocation for creating
+ files. The way it works is that it is kept track what disk was last
+ allocated for a file write. For the current request, the next disk from
+ the set of disks would be allocated if the free space on the disk is
+ sufficient enough to accomodate the file that is being considered for
+ creation. If the space requirements cannot be met, the next disk in order
+ would be tried and so on till a disk is found with sufficient capacity.
+ Once a disk with sufficient space is identified, a check is done to make
+ sure that the disk is writable. Also, there is an API provided that doesn't
+ take the space requirements into consideration but just checks whether the
+ disk under consideration is writable (this should be used for cases where
+ the file size is not known apriori). An API is provided to read a path that
+ was created earlier. That API works by doing a scan of all the disks for the
+ input pathname.
+ This implementation also provides the functionality of having multiple
+ allocators per JVM (one for each unique functionality or context, like
+ mapred, dfs-client, etc.). It ensures that there is only one instance of
+ an allocator per context per JVM.
+ Note:
+ 1. The contexts referred above are actually the configuration items defined
+ in the Configuration class like "mapred.local.dir" (for which we want to
+ control the dir allocations). The context-strings are exactly those
+ configuration items.
+ 2. This implementation does not take into consideration cases where
+ a disk becomes read-only or goes out of space while a file is being written
+ to (disks are shared between multiple processes, and so the latter situation
+ is probable).
+ 3. In the class implementation, "Disk" is referred to as "Dir", which
+ actually points to the configured directory on the Disk which will be the
+ parent for all file write/read allocations.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalDirAllocator -->
+ <!-- start class org.apache.hadoop.fs.LocalFileSystem -->
+ <class name="LocalFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocalFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Moves files to a bad file directory on the same device, so that their
+ storage will not be reused.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the checksumed local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalFileSystem -->
+ <!-- start class org.apache.hadoop.fs.Path -->
+ <class name="Path" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="Path" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a path from a String. Path strings are URIs, but with
+ unescaped elements and some additional normalization.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Path from components.]]>
+ </doc>
+ </constructor>
+ <method name="toUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this to a URI.]]>
+ </doc>
+ </method>
+ <method name="getFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the FileSystem that owns this Path.]]>
+ </doc>
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True if the directory of this path is absolute.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the final component of this path.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the parent of a path or null if at root.]]>
+ </doc>
+ </method>
+ <method name="suffix" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a suffix to the final name in the path.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="depth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of elements in this path.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <doc>
+ <![CDATA[Returns a qualified path object.]]>
+ </doc>
+ </method>
+ <field name="SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The directory separator, a slash.]]>
+ </doc>
+ </field>
+ <field name="SEPARATOR_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CUR_DIR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Names a file or directory in a {@link FileSystem}.
+ Path strings use slash as the directory separator. A path string is
+ absolute if it begins with a slash.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Path -->
+ <!-- start interface org.apache.hadoop.fs.PathFilter -->
+ <interface name="PathFilter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Tests whether or not the specified abstract pathname should be
+ included in a pathname list.
+
+ @param path The abstract pathname to be tested
+ @return <code>true</code> if and only if <code>pathname</code>
+ should be included]]>
+ </doc>
+ </method>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PathFilter -->
+ <!-- start interface org.apache.hadoop.fs.PositionedReadable -->
+ <interface name="PositionedReadable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read upto the specified number of bytes, from a given
+ position within a file, and return the number of bytes read. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the specified number of bytes, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read number of bytes equalt to the length of the buffer, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits positional reading.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PositionedReadable -->
+ <!-- start class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <class name="RawLocalFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RawLocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the specified directory hierarchy. Does not
+ treat existence as an error.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsWorkingFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chown to set owner.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chmod to set permission.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the raw local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <!-- start interface org.apache.hadoop.fs.Seekable -->
+ <interface name="Seekable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits seeking.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Seekable -->
+ <!-- start class org.apache.hadoop.fs.ShellCommand -->
+ <class name="ShellCommand" extends="org.apache.hadoop.util.Shell"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link Shell} instead.">
+ <constructor name="ShellCommand"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A base class for running a unix command like du or df.
+ @deprecated Use {@link Shell} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ShellCommand -->
+ <!-- start interface org.apache.hadoop.fs.Syncable -->
+ <interface name="Syncable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Synchronize all buffer with the underlying devices.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface declare the sync() operation.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Syncable -->
+ <!-- start class org.apache.hadoop.fs.Trash -->
+ <class name="Trash" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Trash" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a trash can accessor.
+ @param conf a Configuration]]>
+ </doc>
+ </constructor>
+ <method name="moveToTrash" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move a file or directory to the current trash directory.
+ @return false if the item is already in the trash or trash is disabled]]>
+ </doc>
+ </method>
+ <method name="checkpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a trash checkpoint.]]>
+ </doc>
+ </method>
+ <method name="expunge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete old checkpoints.]]>
+ </doc>
+ </method>
+ <method name="getEmptier" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a {@link Runnable} that periodically empties the trash of all
+ users, intended to be run by the superuser. Only one checkpoint is kept
+ at a time.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Run an emptier.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provides a <i>trash</i> feature. Files are moved to a user's trash
+ directory, a subdirectory of their home directory named ".Trash". Files are
+ initially moved to a <i>current</i> sub-directory of the trash directory.
+ Within that sub-directory their original path is preserved. Periodically
+ one may checkpoint the current trash and remove older checkpoints. (This
+ design permits trash management without enumeration of the full trash
+ content, without date support in the filesystem, and without clock
+ synchronization.)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Trash -->
+</package>
+<package name="org.apache.hadoop.fs.ftp">
+ <!-- start class org.apache.hadoop.fs.ftp.FTPException -->
+ <class name="FTPException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.String, java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A class to wrap a {@link Throwable} into a Runtime Exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPException -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <class name="FTPFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A stream obtained via this call must be closed before using other APIs of
+ this class or else the invocation will block.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BLOCK_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} backed by an FTP client provided by <a
+ href="http://commons.apache.org/net/">Apache Commons Net</a>.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPInputStream -->
+ <class name="FTPInputStream" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPInputStream" type="java.io.InputStream, org.apache.commons.net.ftp.FTPClient, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readLimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPInputStream -->
+</package>
+<package name="org.apache.hadoop.fs.kfs">
+ <!-- start class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+ <class name="KosmosFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="KosmosFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return null if the file doesn't exist; otherwise, get the
+ locations of the various chunks of the file file from KFS.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A FileSystem backed by KFS.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.permission">
+ <!-- start class org.apache.hadoop.fs.permission.AccessControlException -->
+ <class name="AccessControlException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AccessControlException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor is needed for unwrapping from
+ {@link org.apache.hadoop.ipc.RemoteException}.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an {@link AccessControlException}
+ with the specified detail message.
+ @param s the detail message.]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[An exception class for access control related issues.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.AccessControlException -->
+ <!-- start class org.apache.hadoop.fs.permission.FsAction -->
+ <class name="FsAction" extends="java.lang.Enum&lt;org.apache.hadoop.fs.permission.FsAction&gt;"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.fs.permission.FsAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="implies" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[Return true if this action implies that action.
+ @param that]]>
+ </doc>
+ </method>
+ <method name="and" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[AND operation.]]>
+ </doc>
+ </method>
+ <method name="or" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[OR operation.]]>
+ </doc>
+ </method>
+ <method name="not" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[NOT operation.]]>
+ </doc>
+ </method>
+ <field name="INDEX" type="int"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Octal representation]]>
+ </doc>
+ </field>
+ <field name="SYMBOL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Symbolic representation]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[File system actions, e.g. read, write, etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsAction -->
+ <!-- start class org.apache.hadoop.fs.permission.FsPermission -->
+ <class name="FsPermission" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given {@link FsAction}.
+ @param u user action
+ @param g group action
+ @param o other action]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="short"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given mode.
+ @param mode
+ @see #toShort()]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor
+
+ @param other other permission]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="permission" type="short"/>
+ <doc>
+ <![CDATA[Create an immutable {@link FsPermission} object.]]>
+ </doc>
+ </method>
+ <method name="getUserAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getGroupAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getOtherAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return other {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="fromShort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="short"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link FsPermission} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="toShort" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Encode the object to a short.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply a umask to this permission and return a new one]]>
+ </doc>
+ </method>
+ <method name="getUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="setUMask"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Set the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="getDefault" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default permission.]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unixSymbolicPermission" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create a FsPermission from a Unix symbolic permission string
+ @param unixSymbolicPermission e.g. "-rw-rw-rw-"]]>
+ </doc>
+ </method>
+ <field name="UMASK_LABEL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[umask property label]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_UMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A class for file/directory permissions.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsPermission -->
+ <!-- start class org.apache.hadoop.fs.permission.PermissionStatus -->
+ <class name="PermissionStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="PermissionStatus" type="java.lang.String, java.lang.String, org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <param name="group" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Create an immutable {@link PermissionStatus} object.]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user name]]>
+ </doc>
+ </method>
+ <method name="getGroupName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group name]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return permission]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply umask.
+ @see FsPermission#applyUMask(FsPermission)]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link PermissionStatus} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a {@link PermissionStatus} from its base components.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store permission related information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.PermissionStatus -->
+</package>
+<package name="org.apache.hadoop.fs.s3">
+ <!-- start class org.apache.hadoop.fs.s3.Block -->
+ <class name="Block" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Block" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Holds metadata about a block of data being stored in a {@link FileSystemStore}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.Block -->
+ <!-- start interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <interface name="FileSystemStore" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inode" type="org.apache.hadoop.fs.s3.INode"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="inodeExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveINode" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveBlock" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="byteRangeStart" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listSubPaths" return="java.util.Set&lt;org.apache.hadoop.fs.Path&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listDeepSubPaths" return="java.util.Set&lt;org.apache.hadoop.fs.Path&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="purge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete everything. Used for testing.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="dump"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Diagnostic method to dump all INodes to the console.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for storing and retrieving {@link INode}s and {@link Block}s.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <!-- start class org.apache.hadoop.fs.s3.INode -->
+ <class name="INode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="INode" type="org.apache.hadoop.fs.s3.INode.FileType, org.apache.hadoop.fs.s3.Block[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.fs.s3.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileType" return="org.apache.hadoop.fs.s3.INode.FileType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSerializedLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="serialize" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deserialize" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="FILE_TYPES" type="org.apache.hadoop.fs.s3.INode.FileType[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DIRECTORY_INODE" type="org.apache.hadoop.fs.s3.INode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Holds file metadata including type (regular file, or directory),
+ and the list of blocks that are pointers to the data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.INode -->
+ <!-- start class org.apache.hadoop.fs.s3.MigrationTool -->
+ <class name="MigrationTool" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="MigrationTool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ This class is a tool for migrating data from an older to a newer version
+ of an S3 filesystem.
+ </p>
+ <p>
+ All files in the filesystem are migrated by re-writing the block metadata
+ - no datafiles are touched.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.MigrationTool -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Credentials -->
+ <class name="S3Credentials" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Credentials"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@throws IllegalArgumentException if credentials for S3 cannot be
+ determined.]]>
+ </doc>
+ </method>
+ <method name="getAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSecretAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Extracts AWS credentials from the filesystem URI or configuration.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Credentials -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Exception -->
+ <class name="S3Exception" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Exception" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown if there is a problem communicating with Amazon S3.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Exception -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <class name="S3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="S3FileSystem" type="org.apache.hadoop.fs.s3.FileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[FileStatus for S3 file systems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A block-based {@link FileSystem} backed by
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ </p>
+ @see NativeS3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <class name="S3FileSystemException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystemException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when there is a fatal exception while using {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <!-- start class org.apache.hadoop.fs.s3.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="org.apache.hadoop.fs.s3.S3FileSystemException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when Hadoop cannot read the version of the data stored
+ in {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.VersionMismatchException -->
+</package>
+<package name="org.apache.hadoop.fs.s3native">
+ <!-- start class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+ <class name="NativeS3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeS3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NativeS3FileSystem" type="org.apache.hadoop.fs.s3native.NativeFileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ If <code>f</code> is a file, this method will make a single call to S3.
+ If <code>f</code> is a directory, this method will make a maximum of
+ (<i>n</i> / 1000) + 2 calls to S3, where <i>n</i> is the total number of
+ files and directories contained directly in <code>f</code>.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} for reading and writing files stored on
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation
+ stores files on S3 in their
+ native form so they can be read by other S3 tools.
+ </p>
+ @see org.apache.hadoop.fs.s3.S3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.shell">
+ <!-- start class org.apache.hadoop.fs.shell.Command -->
+ <class name="Command" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Command" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the command's name excluding the leading character -]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the command on the input path
+
+ @param path the input path
+ @throws IOException if any error occurs]]>
+ </doc>
+ </method>
+ <method name="runAll" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[For each source path, execute the command
+
+ @return 0 if it runs successfully; -1 if it fails]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="args" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract class for the execution of a file system command]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Command -->
+ <!-- start class org.apache.hadoop.fs.shell.CommandFormat -->
+ <class name="CommandFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CommandFormat" type="java.lang.String, int, int, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor]]>
+ </doc>
+ </constructor>
+ <method name="parse" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="pos" type="int"/>
+ <doc>
+ <![CDATA[Parse parameters starting from the given position
+
+ @param args an array of input arguments
+ @param pos the position at which starts to parse
+ @return a list of parameters]]>
+ </doc>
+ </method>
+ <method name="getOpt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="option" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return if the option is set or not
+
+ @param option String representation of an option
+ @return true is the option is set; false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Parse the args of a command and check the format of args.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.CommandFormat -->
+ <!-- start class org.apache.hadoop.fs.shell.Count -->
+ <class name="Count" extends="org.apache.hadoop.fs.shell.Command"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Count" type="java.lang.String[], int, org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param cmd the count command
+ @param pos the starting index of the arguments
+ @param fs the file system handler]]>
+ </doc>
+ </constructor>
+ <method name="matches" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Check if a command is the count command
+
+ @param cmd A string representation of a command starting with "-"
+ @return true if this is a count command; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USAGE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DESCRIPTION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Count the number of directories, files, bytes, quota, and remaining quota.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Count -->
+</package>
+<package name="org.apache.hadoop.io">
+ <!-- start class org.apache.hadoop.io.AbstractMapWritable -->
+ <class name="AbstractMapWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="AbstractMapWritable"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor.]]>
+ </doc>
+ </constructor>
+ <method name="addToMap"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add a Class to the maps if it is not already present.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="byte"/>
+ <doc>
+ <![CDATA[@return the Class class for the specified id]]>
+ </doc>
+ </method>
+ <method name="getId" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[@return the id for the specified Class]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Used by child copy constructors.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the conf]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@param conf the conf to set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract base class for MapWritable and SortedMapWritable
+
+ Unlike org.apache.nutch.crawl.MapWritable, this class allows creation of
+ MapWritable&lt;Writable, MapWritable&gt; so the CLASS_TO_ID and ID_TO_CLASS
+ maps travel with the class instead of being static.
+
+ Class ids range from 1 to 127 so there can be at most 127 distinct classes
+ in any specific map instance.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.AbstractMapWritable -->
+ <!-- start class org.apache.hadoop.io.ArrayFile -->
+ <class name="ArrayFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A dense file-based mapping from integers to values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Reader -->
+ <class name="ArrayFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an array reader for the named file.]]>
+ </doc>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader before its <code>n</code>th value.]]>
+ </doc>
+ </method>
+ <method name="next" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read and return the next value in the file.]]>
+ </doc>
+ </method>
+ <method name="key" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the key associated with the most recent call to {@link
+ #seek(long)}, {@link #next(Writable)}, or {@link
+ #get(long,Writable)}.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the <code>n</code>th value in the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Reader -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Writer -->
+ <class name="ArrayFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a value to the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Writer -->
+ <!-- start class org.apache.hadoop.io.ArrayWritable -->
+ <class name="ArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for arrays containing instances of a class. The elements of this
+ writable must all be instances of the same class. If this writable will be
+ the input for a Reducer, you will need to create a subclass that sets the
+ value to be of the proper type.
+
+ For example:
+ <code>
+ public class IntArrayWritable extends ArrayWritable {
+ public IntArrayWritable() {
+ super(IntWritable.class);
+ }
+ }
+ </code>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayWritable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable -->
+ <class name="BooleanWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BooleanWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BooleanWritable" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="get" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for booleans.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <class name="BooleanWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BooleanWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BooleanWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.BytesWritable -->
+ <class name="BytesWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-size sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="BytesWritable" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a BytesWritable using the byte array as the initial value.
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the BytesWritable.
+ @return The data is only valid between 0 and getSize() - 1.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current size of the buffer.]]>
+ </doc>
+ </method>
+ <method name="setSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Change the size of the buffer. The values in the old range are preserved
+ and any new values are undefined. The capacity is changed if it is
+ necessary.
+ @param size The new number of bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum size that could handled without
+ resizing the backing storage.
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_cap" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved.
+ @param new_cap The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="org.apache.hadoop.io.BytesWritable"/>
+ <doc>
+ <![CDATA[Set the BytesWritable to the contents of the given newData.
+ @param newData the value to set this BytesWritable to.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Set the value to a copy of the given byte range
+ @param newData the new values to copy in
+ @param offset the offset in newData to start at
+ @param length the number of bytes to copy]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="right_obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Define the sort order of the BytesWritable.
+ @param right_obj The other bytes writable
+ @return Positive if left is bigger than right, 0 if they are equal, and
+ negative if left is smaller than right.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="right_obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Are the two byte sequences equal?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Generate the stream of bytes as hex pairs separated by ' '.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is usable as a key or value.
+ It is resizable and distinguishes between the size of the seqeunce and
+ the current capacity. The hash function is the front of the md5 of the
+ buffer. The sort order is the same as memcmp.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable -->
+ <!-- start class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <class name="BytesWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BytesWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BytesWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ByteWritable -->
+ <class name="ByteWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="ByteWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ByteWritable" type="byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Set the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a ByteWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two ByteWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for a single byte.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable -->
+ <!-- start class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <class name="ByteWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ByteWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for ByteWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <!-- start interface org.apache.hadoop.io.Closeable -->
+ <interface name="Closeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="use java.io.Closeable">
+ <implements name="java.io.Closeable"/>
+ <doc>
+ <![CDATA[@deprecated use java.io.Closeable]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Closeable -->
+ <!-- start class org.apache.hadoop.io.CompressedWritable -->
+ <class name="CompressedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="CompressedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ensureInflated"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Must be called by all methods which access fields to ensure that the data
+ has been uncompressed.]]>
+ </doc>
+ </method>
+ <method name="readFieldsCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #readFields(DataInput)}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #write(DataOutput)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base-class for Writables which store themselves compressed and lazily
+ inflate on field access. This is useful for large objects whose fields are
+ not be altered during a map or reduce operation: leaving the field data
+ compressed makes copying the instance from one file to another much
+ faster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.CompressedWritable -->
+ <!-- start class org.apache.hadoop.io.DataInputBuffer -->
+ <class name="DataInputBuffer" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataInputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataInput} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataInputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataInputBuffer buffer = new DataInputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using DataInput methods ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataInputBuffer -->
+ <!-- start class org.apache.hadoop.io.DataOutputBuffer -->
+ <class name="DataOutputBuffer" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataOutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <constructor name="DataOutputBuffer" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a DataInput directly into the buffer.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataOutput} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataOutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataOutputBuffer buffer = new DataOutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using DataOutput methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataOutputBuffer -->
+ <!-- start class org.apache.hadoop.io.DefaultStringifier -->
+ <class name="DefaultStringifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Stringifier&lt;T&gt;"/>
+ <constructor name="DefaultStringifier" type="org.apache.hadoop.conf.Configuration, java.lang.Class&lt;T&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="fromString" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="store"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="item" type="K"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the item in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to store
+ @param item the object to be stored
+ @param keyName the name of the key to use
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="load" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class&lt;K&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="storeArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="items" type="K[]"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the array of items in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param items the objects to be stored
+ @param keyName the name of the key to use
+ @throws IndexOutOfBoundsException if the items array is empty
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="loadArray" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class&lt;K&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the array of objects from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[DefaultStringifier is the default implementation of the {@link Stringifier}
+ interface which stringifies the objects using base64 encoding of the
+ serialized version of the objects. The {@link Serializer} and
+ {@link Deserializer} are obtained from the {@link SerializationFactory}.
+ <br>
+ DefaultStringifier offers convenience methods to store/load objects to/from
+ the configuration.
+
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DefaultStringifier -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable -->
+ <class name="DoubleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="DoubleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DoubleWritable" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="double"/>
+ </method>
+ <method name="get" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a DoubleWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Writable for Double values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <class name="DoubleWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DoubleWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for DoubleWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.FloatWritable -->
+ <class name="FloatWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="FloatWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FloatWritable" type="float"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="float"/>
+ <doc>
+ <![CDATA[Set the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a FloatWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two FloatWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for floats.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable -->
+ <!-- start class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <class name="FloatWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FloatWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for FloatWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.GenericWritable -->
+ <class name="GenericWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="GenericWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Set the instance that is wrapped.
+
+ @param obj]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the wrapped instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTypes" return="java.lang.Class[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return all classes that may be wrapped. Subclasses should implement this
+ to return a constant array of classes.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper for Writable instances.
+ <p>
+ When two sequence files, which have same Key type but different Value
+ types, are mapped out to reduce, multiple Value types is not allowed.
+ In this case, this class can help you wrap instances with different types.
+ </p>
+
+ <p>
+ Compared with <code>ObjectWritable</code>, this class is much more effective,
+ because <code>ObjectWritable</code> will append the class declaration as a String
+ into the output file in every Key-Value pair.
+ </p>
+
+ <p>
+ Generic Writable implements {@link Configurable} interface, so that it will be
+ configured by the framework. The configuration is passed to the wrapped objects
+ implementing {@link Configurable} interface <i>before deserialization</i>.
+ </p>
+
+ how to use it: <br>
+ 1. Write your own class, such as GenericObject, which extends GenericWritable.<br>
+ 2. Implements the abstract method <code>getTypes()</code>, defines
+ the classes which will be wrapped in GenericObject in application.
+ Attention: this classes defined in <code>getTypes()</code> method, must
+ implement <code>Writable</code> interface.
+ <br><br>
+
+ The code looks like this:
+ <blockquote><pre>
+ public class GenericObject extends GenericWritable {
+
+ private static Class[] CLASSES = {
+ ClassType1.class,
+ ClassType2.class,
+ ClassType3.class,
+ };
+
+ protected Class[] getTypes() {
+ return CLASSES;
+ }
+
+ }
+ </pre></blockquote>
+
+ @since Nov 8, 2006]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.GenericWritable -->
+ <!-- start class org.apache.hadoop.io.InputBuffer -->
+ <class name="InputBuffer" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link InputStream} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new InputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ InputBuffer buffer = new InputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using InputStream methods ...
+ }
+ </pre>
+ @see DataInputBuffer
+ @see DataOutput]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.InputBuffer -->
+ <!-- start class org.apache.hadoop.io.IntWritable -->
+ <class name="IntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="IntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="IntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a IntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two IntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for ints.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable -->
+ <!-- start class org.apache.hadoop.io.IntWritable.Comparator -->
+ <class name="IntWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IntWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for IntWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.IOUtils -->
+ <class name="IOUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="buffSize" type="int"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param buffSize the size of the buffer
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another. <strong>closes the input and output streams
+ at the end</strong>.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads len bytes in a loop.
+ @param in The InputStream to read from
+ @param buf The buffer to fill
+ @param off offset from the buffer
+ @param len the length of bytes to read
+ @throws IOException if it could not read requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Similar to readFully(). Skips bytes in a loop.
+ @param in The InputStream to skip bytes from
+ @param len number of bytes to skip.
+ @throws IOException if it could not skip requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="closeables" type="java.io.Closeable[]"/>
+ <doc>
+ <![CDATA[Close the Closeable objects and <b>ignore</b> any {@link IOException} or
+ null pointers. Must only be used for cleanup in exception handlers.
+ @param log the log to record problems to at debug level. Can be null.
+ @param closeables the objects to close]]>
+ </doc>
+ </method>
+ <method name="closeStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Closeable"/>
+ <doc>
+ <![CDATA[Closes the stream ignoring {@link IOException}.
+ Must only be called in cleaning up from exception handlers.
+ @param stream the Stream to close]]>
+ </doc>
+ </method>
+ <method name="closeSocket"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <doc>
+ <![CDATA[Closes the socket ignoring {@link IOException}
+ @param sock the Socket to close]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An utility class for I/O related functionality.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils -->
+ <!-- start class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <class name="IOUtils.NullOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils.NullOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[/dev/null of OutputStreams.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <!-- start class org.apache.hadoop.io.LongWritable -->
+ <class name="LongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="LongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a LongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two LongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable -->
+ <!-- start class org.apache.hadoop.io.LongWritable.Comparator -->
+ <class name="LongWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <class name="LongWritable.DecreasingComparator" extends="org.apache.hadoop.io.LongWritable.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.DecreasingComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A decreasing Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <!-- start class org.apache.hadoop.io.MapFile -->
+ <class name="MapFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="oldName" type="java.lang.String"/>
+ <param name="newName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames an existing map directory.]]>
+ </doc>
+ </method>
+ <method name="delete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deletes the named map file.]]>
+ </doc>
+ </method>
+ <method name="fix" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valueClass" type="java.lang.Class"/>
+ <param name="dryrun" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This method attempts to fix a corrupt MapFile by re-creating its index.
+ @param fs filesystem
+ @param dir directory containing the MapFile data and index
+ @param keyClass key class (has to be a subclass of Writable)
+ @param valueClass value class (has to be a subclass of Writable)
+ @param dryrun do not perform any changes, just report what needs to be done
+ @return number of valid entries in this MapFile, or -1 if no fixing was needed
+ @throws Exception]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="INDEX_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the index file.]]>
+ </doc>
+ </field>
+ <field name="DATA_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the data file.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A file-based map from keys to values.
+
+ <p>A map is a directory containing two files, the <code>data</code> file,
+ containing all keys and values in the map, and a smaller <code>index</code>
+ file, containing a fraction of the keys. The fraction is determined by
+ {@link Writer#getIndexInterval()}.
+
+ <p>The index file is read entirely into memory. Thus key implementations
+ should try to keep themselves small.
+
+ <p>Map files are created by adding entries in-order. To maintain a large
+ database, perform updates by copying the previous version of a database and
+ merging in a sorted change list, to create a new version of the database in
+ a new file. Sorting large change lists can be done with {@link
+ SequenceFile.Sorter}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile -->
+ <!-- start class org.apache.hadoop.io.MapFile.Reader -->
+ <class name="MapFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map using the named comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Hook to allow subclasses to defer opening streams until further
+ initialization is complete.
+ @see #createDataFileReader(FileSystem, Path, Configuration)]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="open"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dirName" type="java.lang.String"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDataFileReader" return="org.apache.hadoop.io.SequenceFile.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dataFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link SequenceFile.Reader} returned.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Re-positions the reader before its first key.]]>
+ </doc>
+ </method>
+ <method name="midKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the key at approximately the middle of the file.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalKey"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the final key from the file.
+
+ @param key key to read into]]>
+ </doc>
+ </method>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader at the named key, or if none such exists, at the
+ first entry after the named key. Returns true iff the named key exists
+ in this map.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the map into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ the end of the map]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the value for the named key, or null if none exists.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+ Returns <code>key</code> or if it does not exist, at the first entry
+ after the named key.
+
+- * @param key - key that we're trying to find
+- * @param val - data value if key is found
+- * @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <param name="before" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+
+ @param key - key that we're trying to find
+ @param val - data value if key is found
+ @param before - IF true, and <code>key</code> does not exist, return
+ the first entry that falls just before the <code>key</code>. Otherwise,
+ return the record that sorts just after.
+ @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Reader -->
+ <!-- start class org.apache.hadoop.io.MapFile.Writer -->
+ <class name="MapFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <method name="getIndexInterval" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of entries that are added before an index entry is added.]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval.
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval and stores it in conf
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair to the map. The key must be greater or equal
+ to the previous key added to the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writes a new map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Writer -->
+ <!-- start class org.apache.hadoop.io.MapWritable -->
+ <class name="MapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Map&lt;org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapWritable" type="org.apache.hadoop.io.MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map&lt;? extends org.apache.hadoop.io.Writable, ? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable Map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapWritable -->
+ <!-- start class org.apache.hadoop.io.MD5Hash -->
+ <class name="MD5Hash" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="MD5Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash from a hex string.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash with a specified value.]]>
+ </doc>
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs, reads and returns an instance.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+ <doc>
+ <![CDATA[Copy the contents of another instance into this instance.]]>
+ </doc>
+ </method>
+ <method name="getDigest" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the digest bytes.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="halfDigest" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a half-sized version of this MD5. Fits in a long]]>
+ </doc>
+ </method>
+ <method name="quarterDigest" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a 32-bit digest of the MD5.
+ @return the first 4 bytes of the md5]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is an MD5Hash whose digest contains the
+ same values.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for this object.
+ Only uses the first 4 bytes, since md5s are evenly distributed.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares this object with the specified object for order.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="setDigest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the digest value from a hex string.]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Writable for MD5 hash values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash -->
+ <!-- start class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <class name="MD5Hash.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MD5Hash.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for MD5Hash keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <!-- start class org.apache.hadoop.io.MultipleIOException -->
+ <class name="MultipleIOException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getExceptions" return="java.util.List&lt;java.io.IOException&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the underlying exceptions]]>
+ </doc>
+ </method>
+ <method name="createIOException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="exceptions" type="java.util.List&lt;java.io.IOException&gt;"/>
+ <doc>
+ <![CDATA[A convenient method to create an {@link IOException}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Encapsulate a list of {@link IOException} into an {@link IOException}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MultipleIOException -->
+ <!-- start class org.apache.hadoop.io.NullWritable -->
+ <class name="NullWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <method name="get" return="org.apache.hadoop.io.NullWritable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the single instance of this class.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Singleton Writable with no data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable -->
+ <!-- start class org.apache.hadoop.io.NullWritable.Comparator -->
+ <class name="NullWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator &quot;optimized&quot; for NullWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ObjectWritable -->
+ <class name="ObjectWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="ObjectWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Class, java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the instance, or null if none.]]>
+ </doc>
+ </method>
+ <method name="getDeclaredClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the class this is meant to be.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Reset the instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeObject"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="instance" type="java.lang.Object"/>
+ <param name="declaredClass" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="objectWritable" type="org.apache.hadoop.io.ObjectWritable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A polymorphic Writable that writes an instance with it's class name.
+ Handles arrays, strings and primitive types without a Writable wrapper.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ObjectWritable -->
+ <!-- start class org.apache.hadoop.io.OutputBuffer -->
+ <class name="OutputBuffer" extends="java.io.FilterOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.OutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a InputStream directly into the buffer.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link OutputStream} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new OutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ OutputBuffer buffer = new OutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using OutputStream methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>
+ @see DataOutputBuffer
+ @see InputBuffer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.OutputBuffer -->
+ <!-- start interface org.apache.hadoop.io.RawComparator -->
+ <interface name="RawComparator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Comparator&lt;T&gt;"/>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link Comparator} that operates directly on byte representations of
+ objects.
+ </p>
+ @param <T>
+ @see DeserializerComparator]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.RawComparator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile -->
+ <class name="SequenceFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapred.JobConf#getMapOutputCompressionType()}
+ to get {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the compression type for the reduce outputs
+ @param job the job config to look in
+ @return the kind of compression to use
+ @deprecated Use {@link org.apache.hadoop.mapred.JobConf#getMapOutputCompressionType()}
+ to get {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.JobConf#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the compression type for sequence files.
+ @param job the configuration to modify
+ @param val the new compression type (none, block, record)
+ @deprecated Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.JobConf#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for intermediate map-outputs or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param bufferSize buffer size for the underlaying outputstream.
+ @param replication replication factor for the file.
+ @param blockSize block size for the file.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="SYNC_INTERVAL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes between sync points.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<code>SequenceFile</code>s are flat files consisting of binary key/value
+ pairs.
+
+ <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and
+ {@link Sorter} classes for writing, reading and sorting respectively.</p>
+
+ There are three <code>SequenceFile</code> <code>Writer</code>s based on the
+ {@link CompressionType} used to compress key/value pairs:
+ <ol>
+ <li>
+ <code>Writer</code> : Uncompressed records.
+ </li>
+ <li>
+ <code>RecordCompressWriter</code> : Record-compressed files, only compress
+ values.
+ </li>
+ <li>
+ <code>BlockCompressWriter</code> : Block-compressed files, both keys &
+ values are collected in 'blocks'
+ separately and compressed. The size of
+ the 'block' is configurable.
+ </ol>
+
+ <p>The actual compression algorithm used to compress key and/or values can be
+ specified by using the appropriate {@link CompressionCodec}.</p>
+
+ <p>The recommended way is to use the static <tt>createWriter</tt> methods
+ provided by the <code>SequenceFile</code> to chose the preferred format.</p>
+
+ <p>The {@link Reader} acts as the bridge and can read any of the above
+ <code>SequenceFile</code> formats.</p>
+
+ <h4 id="Formats">SequenceFile Formats</h4>
+
+ <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
+ depending on the <code>CompressionType</code> specified. All of them share a
+ <a href="#Header">common header</a> described below.
+
+ <h5 id="Header">SequenceFile Header</h5>
+ <ul>
+ <li>
+ version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual
+ version number (e.g. SEQ4 or SEQ6)
+ </li>
+ <li>
+ keyClassName -key class
+ </li>
+ <li>
+ valueClassName - value class
+ </li>
+ <li>
+ compression - A boolean which specifies if compression is turned on for
+ keys/values in this file.
+ </li>
+ <li>
+ blockCompression - A boolean which specifies if block-compression is
+ turned on for keys/values in this file.
+ </li>
+ <li>
+ compression codec - <code>CompressionCodec</code> class which is used for
+ compression of keys and/or values (if compression is
+ enabled).
+ </li>
+ <li>
+ metadata - {@link Metadata} for this file.
+ </li>
+ <li>
+ sync - A sync marker to denote end of the header.
+ </li>
+ </ul>
+
+ <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li>Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li><i>Compressed</i> Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record <i>Block</i>
+ <ul>
+ <li>Compressed key-lengths block-size</li>
+ <li>Compressed key-lengths block</li>
+ <li>Compressed keys block-size</li>
+ <li>Compressed keys block</li>
+ <li>Compressed value-lengths block-size</li>
+ <li>Compressed value-lengths block</li>
+ <li>Compressed values block-size</li>
+ <li>Compressed values block</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <p>The compressed blocks of key lengths and value lengths consist of the
+ actual lengths of individual keys/values encoded in ZeroCompressedInteger
+ format.</p>
+
+ @see CompressionCodec]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <class name="SequenceFile.CompressionType" extends="java.lang.Enum&lt;org.apache.hadoop.io.SequenceFile.CompressionType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.SequenceFile.CompressionType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression type used to compress key/value pairs in the
+ {@link SequenceFile}.
+
+ @see SequenceFile.Writer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <class name="SequenceFile.Metadata" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFile.Metadata" type="java.util.TreeMap&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="getMetadata" return="java.util.TreeMap&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The class encapsulating with the metadata of a file.
+ The metadata of a file is a list of attribute name/value
+ pairs of Text type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Reader -->
+ <class name="SequenceFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Reader" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the named file.]]>
+ </doc>
+ </constructor>
+ <method name="openFile" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link FSDataInputStream} returned.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the key class.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the value class.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="isCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if values are compressed.]]>
+ </doc>
+ </method>
+ <method name="isBlockCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if records are block-compressed.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="getMetadata" return="org.apache.hadoop.io.SequenceFile.Metadata"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the metadata object of the file]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file into <code>key</code>, skipping its
+ value. True if another entry exists, and false at end of file.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the file into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ end of file]]>
+ </doc>
+ </method>
+ <method name="next" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.">
+ <param name="buffer" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.]]>
+ </doc>
+ </method>
+ <method name="createValueBytes" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRaw" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' records.
+ @param key - The buffer into which the key is read
+ @param val - The 'raw' value
+ @return Returns the total record length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawKey" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' keys.
+ @param key - The buffer into which the key is read
+ @return Returns the key length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="java.lang.Object"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file, skipping its
+ value. Return null at end of file.]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' values.
+ @param val - The 'raw' value
+ @return Returns the value length
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the current byte position in the input file.
+
+ <p>The position passed must be a position returned by {@link
+ SequenceFile.Writer#getLength()} when writing this file. To seek to an arbitrary
+ position, use {@link SequenceFile.Reader#sync(long)}.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the next sync mark past a given position.]]>
+ </doc>
+ </method>
+ <method name="syncSeen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true iff the previous call to next passed a sync mark.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current byte position in the input file.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reads key/value pairs from a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <class name="SequenceFile.Sorter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge files containing the named classes.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.io.RawComparator, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge using an arbitrary {@link RawComparator}.]]>
+ </doc>
+ </constructor>
+ <method name="setFactor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="factor" type="int"/>
+ <doc>
+ <![CDATA[Set the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="getFactor" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="setMemory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="memory" type="int"/>
+ <doc>
+ <![CDATA[Set the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="getMemory" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setProgressable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progressable" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Set the progressable object in order to report progress.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files into an output file.
+ @param inFiles the files to be sorted
+ @param outFile the sorted output file
+ @param deleteInput should the input files be deleted as they are read?]]>
+ </doc>
+ </method>
+ <method name="sortAndIterate" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files and return an iterator.
+ @param inFiles the files to be sorted
+ @param tempDir the directory where temp files are created during sort
+ @param deleteInput should the input files be deleted as they are read?
+ @return iterator the RawKeyValueIterator]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The backwards compatible interface to sort.
+ @param inFile the input file to sort
+ @param outFile the sorted output file]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="segments" type="java.util.List&lt;org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor&gt;"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the list of segments of type <code>SegmentDescriptor</code>
+ @param segments the list of SegmentDescriptors
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIterator
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[] using a max factor value
+ that is already set
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="factor" type="int"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param factor the factor that will be used as the maximum merge fan-in
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInputs" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param tempDir the directory for creating temp files during merge
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cloneFileAttributes" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="prog" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clones the attributes (like compression of the input file and creates a
+ corresponding Writer
+ @param inputFile the path of the input file whose attributes should be
+ cloned
+ @param outputFile the path of the output file
+ @param prog the Progressable to report status during the file write
+ @return Writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="records" type="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"/>
+ <param name="writer" type="org.apache.hadoop.io.SequenceFile.Writer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes records from RawKeyValueIterator into a file represented by the
+ passed writer
+ @param records the RawKeyValueIterator
+ @param writer the Writer created earlier
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merge the provided files.
+ @param inFiles the array of input path names
+ @param outFile the final output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sorts key/value pairs in a sequence-format file.
+
+ <p>For best performance, applications should make sure that the {@link
+ Writable#readFields(DataInput)} implementation of their keys is
+ very efficient. In particular, it should avoid allocating memory.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <interface name="SequenceFile.Sorter.RawKeyValueIterator" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw key
+ @return DataOutputBuffer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getValue" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw value
+ @return ValueBytes
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets up the current key and value (for getKey and getValue)
+ @return true if there exists a key/value, false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[closes the iterator so that the underlying streams can be closed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the Progress object; this has a float (0.0 - 1.0)
+ indicating the bytes processed by the iterator so far]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to iterate over raw keys/values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <class name="SequenceFile.Sorter.SegmentDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="SequenceFile.Sorter.SegmentDescriptor" type="long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a segment
+ @param segmentOffset the offset of the segment in the file
+ @param segmentLength the length of the segment
+ @param segmentPathName the path name of the file containing the segment]]>
+ </doc>
+ </constructor>
+ <method name="doSync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do the sync checks]]>
+ </doc>
+ </method>
+ <method name="preserveInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="preserve" type="boolean"/>
+ <doc>
+ <![CDATA[Whether to delete the files when no longer needed]]>
+ </doc>
+ </method>
+ <method name="shouldPreserveInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRawKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the rawKey object with the key returned by the Reader
+ @return true if there is a key returned; false, otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rawValue" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the passed rawValue with the value corresponding to the key
+ read earlier
+ @param rawValue
+ @return the length of the value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the stored rawKey]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The default cleanup. Subclasses can override this with a custom
+ cleanup]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class defines a merge segment. This class can be subclassed to
+ provide a customized cleanup method implementation. In this
+ implementation, cleanup closes the file handle and deletes the file]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <interface name="SequenceFile.ValueBytes" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the uncompressed bytes to the outStream.
+ @param outStream : Stream to write uncompressed bytes into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to outStream.
+ Note: that it will NOT compress the bytes if they are not compressed.
+ @param outStream : Stream to write compressed bytes into.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Size of stored data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to 'raw' values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Writer -->
+ <class name="SequenceFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, int, short, long, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a sync point]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="appendRaw"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keyData" type="byte[]"/>
+ <param name="keyOffset" type="int"/>
+ <param name="keyLength" type="int"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current length of the output file.
+
+ <p>This always returns a synchronized position. In other words,
+ immediately after calling {@link SequenceFile.Reader#seek(long)} with a position
+ returned by this method, {@link SequenceFile.Reader#next(Writable)} may be called. However
+ the key may be earlier in the file than key last written when this
+ method was called (e.g., with block-compression, it may be the first key
+ in the block that was being written when this method was called).]]>
+ </doc>
+ </method>
+ <field name="keySerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="uncompressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="compressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Write key/value pairs to a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SetFile -->
+ <class name="SetFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A file-based set of keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile -->
+ <!-- start class org.apache.hadoop.io.SetFile.Reader -->
+ <class name="SetFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set using the named comparator.]]>
+ </doc>
+ </constructor>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in a set into <code>key</code>. Returns
+ true if such a key exists and false when at the end of the set.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the matching key from a set into <code>key</code>.
+ Returns <code>key</code>, or null if no match exists.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SetFile.Writer -->
+ <class name="SetFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="pass a Configuration too">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named set for keys of the named class.
+ @deprecated pass a Configuration too]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element class and compression type.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element comparator and compression type.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key to a set. The key must be strictly greater than the
+ previous key added to the set.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SortedMapWritable -->
+ <class name="SortedMapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="SortedMapWritable" type="org.apache.hadoop.io.SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="comparator" return="java.util.Comparator&lt;? super org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="firstKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="headMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="lastKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="subMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="tailMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set&lt;org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map&lt;? extends org.apache.hadoop.io.WritableComparable, ? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable SortedMap.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SortedMapWritable -->
+ <!-- start interface org.apache.hadoop.io.Stringifier -->
+ <interface name="Stringifier" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Converts the object to a string representation
+ @param obj the object to convert
+ @return the string representation of the object
+ @throws IOException if the object cannot be converted]]>
+ </doc>
+ </method>
+ <method name="fromString" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from its string representation.
+ @param str the string representation of the object
+ @return restored object
+ @throws IOException if the object cannot be restored]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes this object.
+ @throws IOException if an I/O error occurs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stringifier interface offers two methods to convert an object
+ to a string representation and restore the object given its
+ string representation.
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Stringifier -->
+ <!-- start class org.apache.hadoop.io.Text -->
+ <class name="Text" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Text" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a string.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="org.apache.hadoop.io.Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from another text.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a byte array.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the raw bytes; however, only data up to {@link #getLength()} is
+ valid.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of bytes in the byte array]]>
+ </doc>
+ </method>
+ <method name="charAt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="int"/>
+ <doc>
+ <![CDATA[Returns the Unicode Scalar Value (32-bit integer value)
+ for the character at <code>position</code>. Note that this
+ method avoids using the converter or doing String instatiation
+ @return the Unicode scalar value at position or -1
+ if the position is invalid or points to a
+ trailing byte]]>
+ </doc>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Finds any occurence of <code>what</code> in the backing
+ buffer, starting as position <code>start</code>. The starting
+ position is measured in bytes and the return value is in
+ terms of byte position in the buffer. The backing buffer is
+ not converted to a string for this operation.
+ @return byte position of the first occurence of the search
+ string in the UTF-8 buffer or -1 if not found]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <doc>
+ <![CDATA[Set to a utf8 byte array]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[copy a text.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Set the Text to range of bytes
+ @param utf8 the data to copy from
+ @param start the first position of the new string
+ @param len the number of bytes of the new string]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Append a range of bytes to the end of the given text
+ @param utf8 the data to copy from
+ @param start the first position to append from utf8
+ @param len the number of bytes to append]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Clear the string to empty.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert text back to string
+ @see java.lang.Object#toString()]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialize]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one Text in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialize
+ write this object to out
+ length uses zero-compressed encoding
+ @see Writable#write(DataOutput)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare two Texts bytewise using standard UTF8 ordering.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a Text with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[hash function]]>
+ </doc>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If the input is malformed,
+ replace by a default value.]]>
+ </doc>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If the input is malformed,
+ invalid chars are replaced by a default value.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF8 encoded string from in]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF8 encoded string to out]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check if a byte array contains valid utf-8
+ @param utf8 byte array
+ @throws MalformedInputException if the byte array contains invalid utf-8]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check to see if a byte array is valid utf-8
+ @param utf8 the array of bytes
+ @param start the offset of the first byte in the array
+ @param len the length of the byte sequence
+ @throws MalformedInputException if the byte array contains invalid bytes]]>
+ </doc>
+ </method>
+ <method name="bytesToCodePoint" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="java.nio.ByteBuffer"/>
+ <doc>
+ <![CDATA[Returns the next code point at the current position in
+ the buffer. The buffer's position will be incremented.
+ Any mark set on this buffer will be changed by this method!]]>
+ </doc>
+ </method>
+ <method name="utf8Length" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[For the given string, returns the number of UTF-8 bytes
+ required to encode the string.
+ @param string text to encode
+ @return number of UTF-8 bytes required to encode]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class stores text using standard UTF8 encoding. It provides methods
+ to serialize, deserialize, and compare texts at byte level. The type of
+ length is integer and is serialized using zero-compressed format. <p>In
+ addition, it provides methods for string traversal without converting the
+ byte array to a string. <p>Also includes utilities for
+ serializing/deserialing a string, coding/decoding a string, checking if a
+ byte array contains valid UTF8 code, calculating the length of an encoded
+ string.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text -->
+ <!-- start class org.apache.hadoop.io.Text.Comparator -->
+ <class name="Text.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Text.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for Text keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text.Comparator -->
+ <!-- start class org.apache.hadoop.io.TwoDArrayWritable -->
+ <class name="TwoDArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[][]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[][]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[][]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for 2D arrays containing a matrix of instances of a class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.TwoDArrayWritable -->
+ <!-- start class org.apache.hadoop.io.UTF8 -->
+ <class name="UTF8" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="replaced by Text">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UTF8" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <constructor name="UTF8" type="org.apache.hadoop.io.UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw bytes.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the encoded string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one UTF8 in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare two UTF8s.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert to a String.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a UTF8 with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convert a string to a UTF-8 encoded byte array.
+ @see String#getBytes(String)]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string.
+
+ @see DataInput#readUTF()]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF-8 encoded string.
+
+ @see DataOutput#writeUTF(String)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for strings that uses the UTF8 encoding.
+
+ <p>Also includes utilities for efficiently reading and writing UTF-8.
+
+ @deprecated replaced by Text]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8 -->
+ <!-- start class org.apache.hadoop.io.UTF8.Comparator -->
+ <class name="UTF8.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UTF8.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8.Comparator -->
+ <!-- start class org.apache.hadoop.io.VersionedWritable -->
+ <class name="VersionedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="VersionedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="byte"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the version number of the current implementation.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A base class for Writables that provides version checking.
+
+ <p>This is useful when a class may evolve, so that instances written by the
+ old version of the class may still be processed by the new version. To
+ handle this situation, {@link #readFields(DataInput)}
+ implementations should catch {@link VersionMismatchException}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionedWritable -->
+ <!-- start class org.apache.hadoop.io.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="byte, byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Thrown by {@link VersionedWritable#readFields(DataInput)} when the
+ version of an object being read does not match the current implementation
+ version as returned by {@link VersionedWritable#getVersion()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionMismatchException -->
+ <!-- start class org.apache.hadoop.io.VIntWritable -->
+ <class name="VIntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VIntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VIntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VIntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VIntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for integer values stored in variable-length format.
+ Such values take between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVInt(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VIntWritable -->
+ <!-- start class org.apache.hadoop.io.VLongWritable -->
+ <class name="VLongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VLongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VLongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VLongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VLongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs in a variable-length format. Such values take
+ between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVLong(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VLongWritable -->
+ <!-- start interface org.apache.hadoop.io.Writable -->
+ <interface name="Writable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the fields of this object to <code>out</code>.
+
+ @param out <code>DataOuput</code> to serialize this object into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the fields of this object from <code>in</code>.
+
+ <p>For efficiency, implementations should attempt to re-use storage in the
+ existing object where possible.</p>
+
+ @param in <code>DataInput</code> to deseriablize this object from.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A serializable object which implements a simple, efficient, serialization
+ protocol, based on {@link DataInput} and {@link DataOutput}.
+
+ <p>Any <code>key</code> or <code>value</code> type in the Hadoop Map-Reduce
+ framework implements this interface.</p>
+
+ <p>Implementations typically implement a static <code>read(DataInput)</code>
+ method which constructs a new instance, calls {@link #readFields(DataInput)}
+ and returns the instance.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritable implements Writable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public static MyWritable read(DataInput in) throws IOException {
+ MyWritable w = new MyWritable();
+ w.readFields(in);
+ return w;
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Writable -->
+ <!-- start interface org.apache.hadoop.io.WritableComparable -->
+ <interface name="WritableComparable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable&lt;T&gt;"/>
+ <doc>
+ <![CDATA[A {@link Writable} which is also {@link Comparable}.
+
+ <p><code>WritableComparable</code>s can be compared to each other, typically
+ via <code>Comparator</code>s. Any type which is to be used as a
+ <code>key</code> in the Hadoop Map-Reduce framework should implement this
+ interface.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritableComparable implements WritableComparable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public int compareTo(MyWritableComparable w) {
+ int thisValue = this.value;
+ int thatValue = ((IntWritable)o).value;
+ return (thisValue &lt; thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableComparable -->
+ <!-- start class org.apache.hadoop.io.WritableComparator -->
+ <class name="WritableComparator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator"/>
+ <constructor name="WritableComparator" type="java.lang.Class"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </constructor>
+ <constructor name="WritableComparator" type="java.lang.Class, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get a comparator for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link WritableComparable}
+ implementation.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the WritableComparable implementation class.]]>
+ </doc>
+ </method>
+ <method name="newKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a new {@link WritableComparable} instance.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Optimization hook. Override this to make SequenceFile.Sorter's scream.
+
+ <p>The default implementation reads the data into two {@link
+ WritableComparable}s (using {@link
+ Writable#readFields(DataInput)}, then calls {@link
+ #compare(WritableComparable,WritableComparable)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[Compare two WritableComparables.
+
+ <p> The default implementation uses the natural ordering, calling {@link
+ Comparable#compareTo(Object)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <method name="hashBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Compute hash for binary data.]]>
+ </doc>
+ </method>
+ <method name="readUnsignedShort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an unsigned short from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an integer from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a long from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator for {@link WritableComparable}s.
+
+ <p>This base implemenation uses the natural ordering. To define alternate
+ orderings, override {@link #compare(WritableComparable,WritableComparable)}.
+
+ <p>One may optimize compare-intensive operations by overriding
+ {@link #compare(byte[],int,int,byte[],int,int)}. Static utility methods are
+ provided to assist in optimized implementations of this method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableComparator -->
+ <!-- start class org.apache.hadoop.io.WritableFactories -->
+ <class name="WritableFactories" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="factory" type="org.apache.hadoop.io.WritableFactory"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.io.WritableFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factories for non-public writables. Defining a factory permits {@link
+ ObjectWritable} to be able to construct instances of non-public classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableFactories -->
+ <!-- start interface org.apache.hadoop.io.WritableFactory -->
+ <interface name="WritableFactory" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a new instance.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A factory for a class of Writable.
+ @see WritableFactories]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableFactory -->
+ <!-- start class org.apache.hadoop.io.WritableName -->
+ <class name="WritableName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the name that a class should be known as to something other than the
+ class name.]]>
+ </doc>
+ </method>
+ <method name="addName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add an alternate name for a class.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Return the name for a class. Default is {@link Class#getName()}.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the class for a name. Default is {@link Class#forName(String)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility to permit renaming of Writable implementation classes without
+ invalidiating files that contain their class name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableName -->
+ <!-- start class org.apache.hadoop.io.WritableUtils -->
+ <class name="WritableUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="WritableUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readCompressedByteArray" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skipCompressedByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedByteArray" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="bytes" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="displayByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="record" type="byte[]"/>
+ </method>
+ <method name="clone" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="orig" type="org.apache.hadoop.io.Writable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Make a copy of a writable object using serialization to a buffer.
+ @param orig The object to copy
+ @return The copied object]]>
+ </doc>
+ </method>
+ <method name="cloneInto"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.io.Writable"/>
+ <param name="src" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a copy of the writable object using serialiation to a buffer
+ @param dst the object to copy from
+ @param src the object to copy into, which is destroyed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an integer to a binary stream with zero-compressed encoding.
+ For -120 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ integer is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -121 and -124, the following integer
+ is positive, with number of bytes that follow are -(v+120).
+ If the first byte value v is between -125 and -128, the following integer
+ is negative, with number of bytes that follow are -(v+124). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Integer to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized long from stream.]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized integer from stream.]]>
+ </doc>
+ </method>
+ <method name="isNegativeVInt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Given the first byte of a vint/vlong, determine the sign
+ @param value the first byte
+ @return is the value negative]]>
+ </doc>
+ </method>
+ <method name="decodeVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Parse the first byte of a vint/vlong to determine the number of bytes
+ @param value the first byte of the vint/vlong
+ @return the total number of bytes (1 to 9)]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="readEnum" return="T extends java.lang.Enum&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="enumType" type="java.lang.Class&lt;T&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an Enum value from DataInput, Enums are read and written
+ using String values.
+ @param <T> Enum type
+ @param in DataInput to read from
+ @param enumType Class type of Enum
+ @return Enum represented by String read from DataInput
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeEnum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="enumVal" type="java.lang.Enum"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[writes String value of enum to DataOutput.
+ @param out Dataoutput stream
+ @param enumVal enum value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip <i>len</i> number of bytes in input stream<i>in</i>
+ @param in input stream
+ @param len number of bytes to skip
+ @throws IOException when skipped less number of bytes]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableUtils -->
+</package>
+<package name="org.apache.hadoop.io.compress">
+ <!-- start class org.apache.hadoop.io.compress.CodecPool -->
+ <class name="CodecPool" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CodecPool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <doc>
+ <![CDATA[Get a {@link Compressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the
+ <code>Compressor</code>
+ @return <code>Compressor</code> for the given
+ <code>CompressionCodec</code> from the pool or a new one]]>
+ </doc>
+ </method>
+ <method name="getDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <doc>
+ <![CDATA[Get a {@link Decompressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the
+ <code>Decompressor</code>
+ @return <code>Decompressor</code> for the given
+ <code>CompressionCodec</code> the pool or a new one]]>
+ </doc>
+ </method>
+ <method name="returnCompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <doc>
+ <![CDATA[Return the {@link Compressor} to the pool.
+
+ @param compressor the <code>Compressor</code> to be returned to the pool]]>
+ </doc>
+ </method>
+ <method name="returnDecompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <doc>
+ <![CDATA[Return the {@link Decompressor} to the pool.
+
+ @param decompressor the <code>Decompressor</code> to be returned to the
+ pool]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A global compressor/decompressor pool used to save and reuse
+ (possibly native) compression/decompression codecs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CodecPool -->
+ <!-- start interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <interface name="CompressionCodec" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream}.
+
+ @param out the location for the final output stream
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream} with the given {@link Compressor}.
+
+ @param out the location for the final output stream
+ @param compressor compressor to use
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
+
+ @return the type of compressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Compressor} for use by this {@link CompressionCodec}.
+
+ @return a new compressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a stream decompressor that will read from the given input stream.
+
+ @param in the stream to read compressed bytes from
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionInputStream} that will read from the given
+ {@link InputStream} with the given {@link Decompressor}.
+
+ @param in the stream to read compressed bytes from
+ @param decompressor decompressor to use
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
+
+ @return the type of decompressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
+
+ @return a new decompressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a streaming compression/decompression pair.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <class name="CompressionCodecFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionCodecFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Find the codecs specified in the config value io.compression.codecs
+ and register them. Defaults to gzip and zip.]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Print the extension map out as a string.]]>
+ </doc>
+ </method>
+ <method name="getCodecClasses" return="java.util.List&lt;java.lang.Class&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the list of codecs listed in the configuration
+ @param conf the configuration to look in
+ @return a list of the Configuration classes or null if the attribute
+ was not set]]>
+ </doc>
+ </method>
+ <method name="setCodecClasses"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="classes" type="java.util.List&lt;java.lang.Class&gt;"/>
+ <doc>
+ <![CDATA[Sets a list of codec classes in the configuration.
+ @param conf the configuration to modify
+ @param classes the list of classes to set]]>
+ </doc>
+ </method>
+ <method name="getCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Find the relevant compression codec for the given file based on its
+ filename suffix.
+ @param file the filename to check
+ @return the codec object]]>
+ </doc>
+ </method>
+ <method name="removeSuffix" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes a suffix from a filename, if it has it.
+ @param filename the filename to strip
+ @param suffix the suffix to remove
+ @return the shortened filename]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[A little test program.
+ @param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A factory that will find the correct codec for a given filename.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <class name="CompressionInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression input stream that reads
+ the decompressed bytes from the given stream.
+
+ @param in The input stream to be compressed.]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read bytes from the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the decompressor to its initial state and discard any buffered data,
+ as the underlying stream may have been repositioned.]]>
+ </doc>
+ </method>
+ <field name="in" type="java.io.InputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The input stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression input stream.
+
+ <p>Implementations are assumed to be buffered. This permits clients to
+ reposition the underlying input stream then call {@link #resetState()},
+ without having to also synchronize client buffers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <class name="CompressionOutputStream" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression output stream that writes
+ the compressed bytes to the given stream.
+ @param out]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finishes writing compressed data to the output stream
+ without closing the underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the compression to the initial state.
+ Does not reset the underlying stream.]]>
+ </doc>
+ </method>
+ <field name="out" type="java.io.OutputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The output stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression output stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <!-- start interface org.apache.hadoop.io.compress.Compressor -->
+ <interface name="Compressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for compression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of uncompressed bytes input so far.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of compressed bytes output so far.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[When called, indicates that compression should end
+ with the current contents of the input buffer.]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with compressed data. Returns actual number
+ of bytes of compressed data. A return value of 0 indicates that
+ needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets compressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the compressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'compressor' which can be
+ plugged into a {@link CompressionOutputStream} to compress data.
+ This is modelled after {@link java.util.zip.Deflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Compressor -->
+ <!-- start interface org.apache.hadoop.io.compress.Decompressor -->
+ <interface name="Decompressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for decompression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns <code>true</code> if a preset dictionary is needed for decompression.
+ @return <code>true</code> if a preset dictionary is needed for decompression]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with uncompressed data. Returns actual number
+ of bytes of uncompressed data. A return value of 0 indicates that
+ #needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets decompressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the decompressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'de-compressor' which can be
+ plugged into a {@link CompressionInputStream} to compress data.
+ This is modelled after {@link java.util.zip.Inflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Decompressor -->
+ <!-- start class org.apache.hadoop.io.compress.DefaultCodec -->
+ <class name="DefaultCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="DefaultCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.DefaultCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec -->
+ <class name="GzipCodec" extends="org.apache.hadoop.io.compress.DefaultCodec"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class creates gzip compressors/decompressors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <class name="GzipCodec.GzipInputStream" extends="org.apache.hadoop.io.compress.DecompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipInputStream" type="org.apache.hadoop.io.compress.DecompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow subclasses to directly set the inflater stream.]]>
+ </doc>
+ </constructor>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <class name="GzipCodec.GzipOutputStream" extends="org.apache.hadoop.io.compress.CompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipOutputStream" type="org.apache.hadoop.io.compress.CompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow children types to put a different type in here.
+ @param out the Deflater stream to use]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A bridge that wraps around a DeflaterOutputStream to make it
+ a CompressionOutputStream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <!-- start class org.apache.hadoop.io.compress.LzoCodec -->
+ <class name="LzoCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="LzoCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-lzo library is loaded & initialized.
+
+ @param conf configuration
+ @return <code>true</code> if native-lzo library is loaded & initialized;
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link org.apache.hadoop.io.compress.CompressionCodec} for a streaming
+ <b>lzo</b> compression/decompression pair.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzoCodec -->
+</package>
+<package name="org.apache.hadoop.io.compress.lzo">
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor -->
+ <class name="LzoCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="LzoCompressor" type="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified {@link CompressionStrategy}.
+
+ @param strategy lzo compression algorithm to use
+ @param directBufferSize size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="LzoCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default lzo1x_1 compression.]]>
+ </doc>
+ </constructor>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if lzo compressors are loaded and initialized.
+
+ @return <code>true</code> if lzo compressors are loaded & initialized,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of bytes given to this compressor since last reset.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of bytes consumed by callers of compress since last reset.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Noop.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the lzo algorithm.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy -->
+ <class name="LzoCompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression algorithm for lzo library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor -->
+ <class name="LzoDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="LzoDecompressor" type="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new lzo decompressor.
+
+ @param strategy lzo decompression algorithm
+ @param directBufferSize size of the direct-buffer]]>
+ </doc>
+ </constructor>
+ <constructor name="LzoDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new lzo decompressor.]]>
+ </doc>
+ </constructor>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if lzo decompressors are loaded and initialized.
+
+ @return <code>true</code> if lzo decompressors are loaded & initialized,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the lzo algorithm.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy -->
+ <class name="LzoDecompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy -->
+</package>
+<package name="org.apache.hadoop.io.compress.zlib">
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <class name="BuiltInZlibDeflater" extends="java.util.zip.Deflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="BuiltInZlibDeflater" type="int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Deflater to make it conform
+ to org.apache.hadoop.io.compress.Compressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <class name="BuiltInZlibInflater" extends="java.util.zip.Inflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="BuiltInZlibInflater" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibInflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Inflater to make it conform
+ to org.apache.hadoop.io.compress.Decompressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <class name="ZlibCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="ZlibCompressor" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified compression level.
+ Compressed data will be generated in ZLIB format.
+
+ @param level Compression level #CompressionLevel
+ @param strategy Compression strategy #CompressionStrategy
+ @param header Compression header #CompressionHeader
+ @param directBufferSize Size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default compression level.
+ Compressed data will be generated in ZLIB format.]]>
+ </doc>
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <class name="ZlibCompressor.CompressionHeader" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The type of header for compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <class name="ZlibCompressor.CompressionLevel" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <class name="ZlibCompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <class name="ZlibDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="ZlibDecompressor" type="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new decompressor.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <class name="ZlibDecompressor.CompressionHeader" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The headers to detect from compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+ <class name="ZlibFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ZlibFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeZlibLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-zlib code is loaded & initialized correctly and
+ can be loaded for this job.
+
+ @param conf configuration
+ @return <code>true</code> if native-zlib is loaded & initialized
+ and can be loaded for this job, else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of factories to create the right
+ zlib/gzip compressor/decompressor instances.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+</package>
+<package name="org.apache.hadoop.io.retry">
+ <!-- start class org.apache.hadoop.io.retry.RetryPolicies -->
+ <class name="RetryPolicies" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryPolicies"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="retryUpToMaximumCountWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumTimeWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxTime" type="long"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying for a maximum time, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumCountWithProportionalSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by the number of tries so far.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="exponentialBackoffRetry" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by a random
+ number in the range of [0, 2 to the number of retries)
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map&lt;java.lang.Class&lt;? extends java.lang.Exception&gt;, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByRemoteException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map&lt;java.lang.Class&lt;? extends java.lang.Exception&gt;, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ A retry policy for RemoteException
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <field name="TRY_ONCE_THEN_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail by re-throwing the exception.
+ This corresponds to having no retry mechanism in place.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="TRY_ONCE_DONT_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail silently for <code>void</code> methods, or by
+ re-throwing the exception for non-<code>void</code> methods.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="RETRY_FOREVER" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Keep trying forever.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A collection of useful implementations of {@link RetryPolicy}.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryPolicies -->
+ <!-- start interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <interface name="RetryPolicy" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="shouldRetry" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Exception"/>
+ <param name="retries" type="int"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[<p>
+ Determines whether the framework should retry a
+ method for the given exception, and the number
+ of retries that have been made for that operation
+ so far.
+ </p>
+ @param e The exception that caused the method to fail.
+ @param retries The number of times the method has been retried.
+ @return <code>true</code> if the method should be retried,
+ <code>false</code> if the method should not be retried
+ but shouldn't fail with an exception (only for void methods).
+ @throws Exception The re-thrown exception <code>e</code> indicating
+ that the method failed and should not be retried further.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Specifies a policy for retrying method failures.
+ Implementations of this interface should be immutable.
+ </p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <!-- start class org.apache.hadoop.io.retry.RetryProxy -->
+ <class name="RetryProxy" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryProxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class&lt;?&gt;"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the same retry policy for each method in the interface.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param retryPolicy the policy for retirying method call failures
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class&lt;?&gt;"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="methodNameToPolicyMap" type="java.util.Map&lt;java.lang.String, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the a set of retry policies specified by method name.
+ If no retry policy is defined for a method then a default of
+ {@link RetryPolicies#TRY_ONCE_THEN_FAIL} is used.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param methodNameToPolicyMap a map of method names to retry policies
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for creating retry proxies.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryProxy -->
+</package>
+<package name="org.apache.hadoop.io.serializer">
+ <!-- start interface org.apache.hadoop.io.serializer.Deserializer -->
+ <interface name="Deserializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the deserializer for reading.</p>]]>
+ </doc>
+ </method>
+ <method name="deserialize" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ Deserialize the next object from the underlying input stream.
+ If the object <code>t</code> is non-null then this deserializer
+ <i>may</i> set its internal state to the next object read from the input
+ stream. Otherwise, if the object <code>t</code> is null a new
+ deserialized object will be created.
+ </p>
+ @return the deserialized object]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying input stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for deserializing objects of type <T> from an
+ {@link InputStream}.
+ </p>
+
+ <p>
+ Deserializers are stateful, but must not buffer the input since
+ other producers may read from the input between calls to
+ {@link #deserialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Deserializer -->
+ <!-- start class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <class name="DeserializerComparator" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator&lt;T&gt;"/>
+ <constructor name="DeserializerComparator" type="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link Deserializer} to deserialize
+ the objects to be compared so that the standard {@link Comparator} can
+ be used to compare them.
+ </p>
+ <p>
+ One may optimize compare-intensive operations by using a custom
+ implementation of {@link RawComparator} that operates directly
+ on byte representations.
+ </p>
+ @param <T>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <class name="JavaSerialization" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization&lt;java.io.Serializable&gt;"/>
+ <constructor name="JavaSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;java.io.Serializable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;java.io.Serializable&gt;"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;java.io.Serializable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;java.io.Serializable&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ An experimental {@link Serialization} for Java {@link Serializable} classes.
+ </p>
+ @see JavaSerializationComparator]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <class name="JavaSerializationComparator" extends="org.apache.hadoop.io.serializer.DeserializerComparator&lt;T&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JavaSerializationComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o1" type="T extends java.io.Serializable &amp; java.lang.Comparable&lt;T&gt;"/>
+ <param name="o2" type="T extends java.io.Serializable &amp; java.lang.Comparable&lt;T&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link JavaSerialization}
+ {@link Deserializer} to deserialize objects that are then compared via
+ their {@link Comparable} interfaces.
+ </p>
+ @param <T>
+ @see JavaSerialization]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serialization -->
+ <interface name="Serialization" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Allows clients to test whether this {@link Serialization}
+ supports the given class.]]>
+ </doc>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[@return a {@link Serializer} for the given class.]]>
+ </doc>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[@return a {@link Deserializer} for the given class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Encapsulates a {@link Serializer}/{@link Deserializer} pair.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serialization -->
+ <!-- start class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <class name="SerializationFactory" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SerializationFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Serializations are found by reading the <code>io.serializations</code>
+ property from <code>conf</code>, which is a comma-delimited list of
+ classnames.
+ </p>]]>
+ </doc>
+ </constructor>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <method name="getSerialization" return="org.apache.hadoop.io.serializer.Serialization&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for {@link Serialization}s.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serializer -->
+ <interface name="Serializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the serializer for writing.</p>]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Serialize <code>t</code> to the underlying output stream.</p>]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying output stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for serializing objects of type <T> to an
+ {@link OutputStream}.
+ </p>
+
+ <p>
+ Serializers are stateful, but must not buffer the output since
+ other producers may write to the output between calls to
+ {@link #serialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serializer -->
+ <!-- start class org.apache.hadoop.io.serializer.WritableSerialization -->
+ <class name="WritableSerialization" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization&lt;org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="WritableSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;org.apache.hadoop.io.Writable&gt;"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;org.apache.hadoop.io.Writable&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Serialization} for {@link Writable}s that delegates to
+ {@link Writable#write(java.io.DataOutput)} and
+ {@link Writable#readFields(java.io.DataInput)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.WritableSerialization -->
+</package>
+<package name="org.apache.hadoop.ipc">
+ <!-- start class org.apache.hadoop.ipc.Client -->
+ <class name="Client" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Client" type="java.lang.Class, org.apache.hadoop.conf.Configuration, javax.net.SocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client whose values are of the given {@link Writable}
+ class.]]>
+ </doc>
+ </constructor>
+ <constructor name="Client" type="java.lang.Class&lt;?&gt;, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client with the default SocketFactory
+ @param valueClass
+ @param conf]]>
+ </doc>
+ </constructor>
+ <method name="setPingInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="pingInterval" type="int"/>
+ <doc>
+ <![CDATA[set the ping interval value in configuration
+
+ @param conf Configuration
+ @param pingInterval the ping interval]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop all threads related to this client. No further calls may be made
+ using this client.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a call, passing <code>param</code>, to the IPC server running at
+ <code>address</code>, returning the value. Throws exceptions if there are
+ network problems or if the remote code threw an exception.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="params" type="org.apache.hadoop.io.Writable[]"/>
+ <param name="addresses" type="java.net.InetSocketAddress[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Makes a set of calls in parallel. Each parameter is sent to the
+ corresponding address. When all values are available, or have timed out
+ or errored, the collected results are returned in an array. The array
+ contains nulls for calls that timed out or errored.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A client for an IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Server]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Client -->
+ <!-- start class org.apache.hadoop.ipc.RemoteException -->
+ <class name="RemoteException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RemoteException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lookupTypes" type="java.lang.Class[]"/>
+ <doc>
+ <![CDATA[If this remote exception wraps up one of the lookupTypes
+ then return this exception.
+ <p>
+ Unwraps any IOException.
+
+ @param lookupTypes the desired exception class.
+ @return IOException, which is either the lookupClass exception or this.]]>
+ </doc>
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Instantiate and return the exception wrapped up by this remote exception.
+
+ <p> This unwraps any <code>Throwable</code> that has a constructor taking
+ a <code>String</code> as a parameter.
+ Otherwise it returns this.
+
+ @return <code>Throwable]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RemoteException -->
+ <!-- start class org.apache.hadoop.ipc.RPC -->
+ <class name="RPC" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="waitForProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object with the default SocketFactory
+
+ @param protocol
+ @param clientVersion
+ @param addr
+ @param conf
+ @return a proxy instance
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopProxy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="proxy" type="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <doc>
+ <![CDATA[Stop this proxy and release its invoker's resource
+ @param proxy the proxy to be stopped]]>
+ </doc>
+ </method>
+ <method name="call" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="method" type="java.lang.reflect.Method"/>
+ <param name="params" type="java.lang.Object[][]"/>
+ <param name="addrs" type="java.net.InetSocketAddress[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Expert: Make multiple, parallel calls to a set of servers.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="numHandlers" type="int"/>
+ <param name="verbose" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple RPC mechanism.
+
+ A <i>protocol</i> is a Java interface. All parameters and return types must
+ be one of:
+
+ <ul> <li>a primitive type, <code>boolean</code>, <code>byte</code>,
+ <code>char</code>, <code>short</code>, <code>int</code>, <code>long</code>,
+ <code>float</code>, <code>double</code>, or <code>void</code>; or</li>
+
+ <li>a {@link String}; or</li>
+
+ <li>a {@link Writable}; or</li>
+
+ <li>an array of the above types</li> </ul>
+
+ All methods in the protocol should throw only IOException. No field data of
+ the protocol instance is transmitted.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC -->
+ <!-- start class org.apache.hadoop.ipc.RPC.Server -->
+ <class name="RPC.Server" extends="org.apache.hadoop.ipc.Server"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on]]>
+ </doc>
+ </constructor>
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on
+ @param numHandlers the number of method handler threads to run
+ @param verbose whether each call should be logged]]>
+ </doc>
+ </constructor>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receivedTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An RPC Server.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.Server -->
+ <!-- start class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <class name="RPC.VersionMismatch" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.VersionMismatch" type="java.lang.String, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a version mismatch exception
+ @param interfaceName the name of the protocol mismatch
+ @param clientVersion the client's version of the protocol
+ @param serverVersion the server's version of the protocol]]>
+ </doc>
+ </constructor>
+ <method name="getInterfaceName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the interface name
+ @return the java class name
+ (eg. org.apache.hadoop.mapred.InterTrackerProtocol)]]>
+ </doc>
+ </method>
+ <method name="getClientVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the client's preferred version]]>
+ </doc>
+ </method>
+ <method name="getServerVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the server's agreed to version.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A version mismatch for the RPC protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <!-- start class org.apache.hadoop.ipc.Server -->
+ <class name="Server" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class, int, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class&lt;?&gt;, int, org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a server listening on the named port and address. Parameters passed must
+ be of the named class. The <code>handlerCount</handlerCount> determines
+ the number of handler threads that will be used to process calls.]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.ipc.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the server instance called under or null. May be called under
+ {@link #call(Writable, long)} implementations, and under {@link Writable}
+ methods of paramters and return values. Permits applications to access
+ the server context.]]>
+ </doc>
+ </method>
+ <method name="getRemoteIp" return="java.net.InetAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the remote side ip address when invoked inside an RPC
+ Returns null incase of an error.]]>
+ </doc>
+ </method>
+ <method name="getRemoteAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns remote address as a string when invoked inside an RPC.
+ Returns null in case of an error.]]>
+ </doc>
+ </method>
+ <method name="bind"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.ServerSocket"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <param name="backlog" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A convenience method to bind to a given address and report
+ better exceptions if the address is not a valid host.
+ @param socket the socket to bind
+ @param address the address to bind to
+ @param backlog the number of connections allowed in the queue
+ @throws BindException if the address can't be bound
+ @throws UnknownHostException if the address isn't a valid host name
+ @throws IOException other random errors from bind]]>
+ </doc>
+ </method>
+ <method name="setTimeout"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[No longer used.]]>
+ </doc>
+ </method>
+ <method name="setSocketSendBufSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Sets the socket buffer size used for responding to RPCs]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts the service. Must be called before any calls will be handled.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops the service. No new calls will be handled after this is called.]]>
+ </doc>
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Wait for the server to be stopped.
+ Does not wait for all subthreads to finish.
+ See {@link #stop()}.]]>
+ </doc>
+ </method>
+ <method name="getListenerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the socket (ip+port) on which the RPC server is listening to.
+ @return the socket (ip+port) on which the RPC server is listening to.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receiveTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called for each call.]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <field name="HEADER" type="java.nio.ByteBuffer"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The first four bytes of Hadoop RPC connections]]>
+ </doc>
+ </field>
+ <field name="CURRENT_VERSION" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="rpcMetrics" type="org.apache.hadoop.ipc.metrics.RpcMetrics"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Client]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Server -->
+ <!-- start interface org.apache.hadoop.ipc.VersionedProtocol -->
+ <interface name="VersionedProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return protocol version corresponding to protocol interface.
+ @param protocol The classname of the protocol interface
+ @param clientVersion The version of the protocol that the client speaks
+ @return the version that the server will speak]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Superclass of all protocols that use Hadoop RPC.
+ Subclasses of this interface are also supposed to have
+ a static final long versionID field.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.VersionedProtocol -->
+</package>
+<package name="org.apache.hadoop.ipc.metrics">
+ <!-- start class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <class name="RpcMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="RpcMetrics" type="java.lang.String, java.lang.String, org.apache.hadoop.ipc.Server"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Push the metrics to the monitoring subsystem on doUpdate() call.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="rpcQueueTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The metrics variables are public:
+ - they can be set directly by calling their set/inc methods
+ -they can also be read directly - e.g. JMX does this.]]>
+ </doc>
+ </field>
+ <field name="rpcProcessingTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="metricsList" type="java.util.Map&lt;java.lang.String, org.apache.hadoop.metrics.util.MetricsTimeVaryingRate&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various RPC statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #rpcQueueTime}.inc(time)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <!-- start interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+ <interface name="RpcMgtMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRpcOpsNumber" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of RPC Operations in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for RPC Operations in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Average RPC Operation Queued Time in the last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all min max times]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for the RPC layer.
+ Many of the statistics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the statistics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ rpc.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+</package>
+<package name="org.apache.hadoop.log">
+ <!-- start class org.apache.hadoop.log.LogLevel -->
+ <class name="LogLevel" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[A command line implementation]]>
+ </doc>
+ </method>
+ <field name="USAGES" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Change log level in runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel -->
+ <!-- start class org.apache.hadoop.log.LogLevel.Servlet -->
+ <class name="LogLevel.Servlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel.Servlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A servlet implementation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel.Servlet -->
+</package>
+<package name="org.apache.hadoop.mapred">
+ <!-- start class org.apache.hadoop.mapred.ClusterStatus -->
+ <class name="ClusterStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of task trackers in the cluster.
+
+ @return the number of task trackers in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running map tasks in the cluster.
+
+ @return the number of currently running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running reduce tasks in the cluster.
+
+ @return the number of currently running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running map tasks in the cluster.
+
+ @return the maximum capacity for running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running reduce tasks in the cluster.
+
+ @return the maximum capacity for running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getJobTrackerState" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current state of the <code>JobTracker</code>,
+ as {@link JobTracker.State}
+
+ @return the current state of the <code>JobTracker</code>.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Status information on the current state of the Map-Reduce cluster.
+
+ <p><code>ClusterStatus</code> provides clients with information such as:
+ <ol>
+ <li>
+ Size of the cluster.
+ </li>
+ <li>
+ Task capacity of the cluster.
+ </li>
+ <li>
+ The number of currently running map & reduce tasks.
+ </li>
+ <li>
+ State of the <code>JobTracker</code>.
+ </li>
+ </ol></p>
+
+ <p>Clients can query for the latest <code>ClusterStatus</code>, via
+ {@link JobClient#getClusterStatus()}.</p>
+
+ @see JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ClusterStatus -->
+ <!-- start class org.apache.hadoop.mapred.Counters -->
+ <class name="Counters" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.mapred.Counters.Group&gt;"/>
+ <constructor name="Counters"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getGroupNames" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all counter classes.
+ @return Set of counter names.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.mapred.Counters.Group&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getGroup" return="org.apache.hadoop.mapred.Counters.Group"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="groupName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the named counter group, or an empty group if there is none
+ with the specified name.]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Find the counter for the given enum. The same enum will always return the
+ same counter.
+ @param key the counter key
+ @return the matching counter object]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="group" type="java.lang.String"/>
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Find a counter by using strings
+ @param group the name of the group
+ @param id the id of the counter within the group (0 to N-1)
+ @param name the internal name of the counter
+ @return the counter for that name
+ @deprecated]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param key identifies a counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="counter" type="java.lang.String"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param group the name of the group
+ @param counter the internal name of the counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Returns current value of the specified counter, or 0 if the counter
+ does not exist.]]>
+ </doc>
+ </method>
+ <method name="incrAllCounters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Increments multiple counters by their amounts in another Counters
+ instance.
+ @param other the other Counters instance]]>
+ </doc>
+ </method>
+ <method name="sum" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.mapred.Counters"/>
+ <param name="b" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Convenience method for computing the sum of two sets of counters.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of counters, by summing the number of counters
+ in each group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the set of groups.
+ The external format is:
+ #groups (groupName group)*
+
+ i.e. the number of groups followed by 0 or more groups, where each
+ group is of the form:
+
+ groupDisplayName #counters (false | true counter)*
+
+ where each counter is of the form:
+
+ name (false | true displayName) value]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a set of groups.]]>
+ </doc>
+ </method>
+ <method name="log"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Logs the current counter values.
+ @param log The log to use.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return textual representation of the counter values.]]>
+ </doc>
+ </method>
+ <method name="makeCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert a counters object into a single line that is easy to parse.
+ @return the string with "name=value" for each counter and separated by ","]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A set of named counters.
+
+ <p><code>Counters</code> represent global counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> can be of
+ any {@link Enum} type.</p>
+
+ <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
+ counters from a particular <code>Enum</code> class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Counter -->
+ <class name="Counters.Counter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the internal name of the counter.
+ @return the internal name of the counter]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the counter.
+ @return the user facing name of the counter]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[What is the current value of this counter?
+ @return the current value]]>
+ </doc>
+ </method>
+ <method name="increment"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Increment this counter by the given value
+ @param incr the value to increase this counter by]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A counter record, comprising its name and value.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Counter -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Group -->
+ <class name="Counters.Group" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.mapred.Counters.Counter&gt;"/>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns raw name of the group. This is the name of the enum class
+ for this group of counters.]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns localized name of the group. This is the same as getName() by
+ default, but different if an appropriate ResourceBundle is found.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="counterName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the specified counter, or 0 if the counter does
+ not exist.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getCounter(String)} instead">
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given id and create it if it doesn't exist.
+ @param id the numeric id of the counter within the group
+ @param name the internal counter name
+ @return the counter
+ @deprecated use {@link #getCounter(String)} instead]]>
+ </doc>
+ </method>
+ <method name="getCounterForName" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given name and create it if it doesn't exist.
+ @param name the internal counter name
+ @return the counter]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of counters in this group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.mapred.Counters.Counter&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<code>Group</code> of counters, comprising of counters from a particular
+ counter {@link Enum} class.
+
+ <p><code>Group</code>handles localization of the class name and the
+ counter names.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Group -->
+ <!-- start class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <class name="DefaultJobHistoryParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DefaultJobHistoryParser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseJobTasks"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobHistoryFile" type="java.lang.String"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobHistory.JobInfo"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Populates a JobInfo object from the job's history log file.
+ @param jobHistoryFile history file for this job.
+ @param job a precreated JobInfo object, should be non-null.
+ @param fs FileSystem where historyFile is present.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Default parser for job history files. It creates object model from
+ job history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <!-- start class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <class name="FileAlreadyExistsException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileAlreadyExistsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileAlreadyExistsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when target file already exists for any operation and
+ is not configured to be overwritten.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <!-- start class org.apache.hadoop.mapred.FileInputFormat -->
+ <class name="FileInputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <constructor name="FileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setMinSplitSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="minSplitSize" type="long"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="filename" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Is the given filename splitable? Usually, true, but if the file is
+ stream compressed, it will not be.
+
+ <code>FileInputFormat</code> implementations can override this and return
+ <code>false</code> to ensure that individual input files are never split-up
+ so that {@link Mapper}s process entire files.
+
+ @param fs the file system that the file is on
+ @param filename the file name to check
+ @return is this file splitable?]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setInputPathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="filter" type="java.lang.Class&lt;? extends org.apache.hadoop.fs.PathFilter&gt;"/>
+ <doc>
+ <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job.
+
+ @param filter the PathFilter class use for filtering the input paths.]]>
+ </doc>
+ </method>
+ <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get a PathFilter instance of the filter set for the input paths.
+
+ @return the PathFilter instance set for the job, NULL if none has been set.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression.
+
+ @param job the job to list input paths for
+ @return array of FileStatus objects
+ @throws IOException if zero items.]]>
+ </doc>
+ </method>
+ <method name="listPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="Use {@link #listStatus(JobConf)} instead.">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression.
+
+ @param job the job to list input paths for
+ @return array of Path objects
+ @throws IOException if zero items.
+ @deprecated Use {@link #listStatus(JobConf)} instead.]]>
+ </doc>
+ </method>
+ <method name="validateInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Splits files returned by {@link #listStatus(JobConf)} when
+ they're too big.]]>
+ </doc>
+ </method>
+ <method name="computeSplitSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="goalSize" type="long"/>
+ <param name="minSize" type="long"/>
+ <param name="blockSize" type="long"/>
+ </method>
+ <method name="getBlockIndex" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+ <param name="offset" type="long"/>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the given comma separated paths as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be set as
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add the given comma separated paths to the list of inputs for
+ the map-reduce job.
+
+ @param conf The configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be added to
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Set the array of {@link Path}s as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job.
+ @param inputPaths the {@link Path}s of the input directories/files
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+
+ @param conf The configuration of the job
+ @return the list of input {@link Path}s for the map-reduce job.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class for file-based {@link InputFormat}.
+
+ <p><code>FileInputFormat</code> is the base class for all file-based
+ <code>InputFormat</code>s. This provides a generic implementation of
+ {@link #getSplits(JobConf, int)}.
+ Subclasses of <code>FileInputFormat</code> can also override the
+ {@link #isSplitable(FileSystem, Path)} method to ensure input-files are
+ not split-up and are processed as a whole by {@link Mapper}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileOutputFormat -->
+ <class name="FileOutputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="FileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setCompressOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+ <code>false</code> otherwise]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+ compress the job outputs]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the
+ job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the {@link Path} of the output directory for the map-reduce job.
+
+ @param conf The configuration of the job.
+ @param outputDir the {@link Path} of the output directory for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the output directory for the map-reduce job.
+
+ @return the {@link Path} to the output directory for the map-reduce job.
+ @see FileOutputFormat#getWorkOutputPath(JobConf)]]>
+ </doc>
+ </method>
+ <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the task's temporary output directory
+ for the map-reduce job
+
+ <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4>
+
+ <p>Some applications need to create/write-to side-files, which differ from
+ the actual job-outputs.
+
+ <p>In such cases there could be issues with 2 instances of the same TIP
+ (running simultaneously e.g. speculative tasks) trying to open/write-to the
+ same file (path) on HDFS. Hence the application-writer will have to pick
+ unique names per task-attempt (e.g. using the attemptid, say
+ <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p>
+
+ <p>To get around this the Map-Reduce framework helps the application-writer
+ out by maintaining a special
+ <tt>${mapred.output.dir}/_temporary/_${taskid}</tt>
+ sub-directory for each task-attempt on HDFS where the output of the
+ task-attempt goes. On successful completion of the task-attempt the files
+ in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only)
+ are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the
+ framework discards the sub-directory of unsuccessful task-attempts. This
+ is completely transparent to the application.</p>
+
+ <p>The application-writer can take advantage of this by creating any
+ side-files required in <tt>${mapred.work.output.dir}</tt> during execution
+ of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the
+ framework will move them out similarly - thus she doesn't have to pick
+ unique paths per task-attempt.</p>
+
+ <p><i>Note</i>: the value of <tt>${mapred.work.output.dir}</tt> during
+ execution of a particular task-attempt is actually
+ <tt>${mapred.output.dir}/_temporary/_{$taskid}</tt>, and this value is
+ set by the map-reduce framework. So, just create any side-files in the
+ path returned by {@link #getWorkOutputPath(JobConf)} from map/reduce
+ task to take advantage of this feature.</p>
+
+ <p>The entire discussion holds true for maps of jobs with
+ reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
+ goes directly to HDFS.</p>
+
+ @return the {@link Path} to the task's temporary output directory
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to create the task's temporary output directory and
+ return the path to the task's output file.
+
+ @param conf job-configuration
+ @param name temporary task-output filename
+ @return path to the task's temporary output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base class for {@link OutputFormat}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileSplit -->
+ <class name="FileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[Constructs a split.
+ @deprecated
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process]]>
+ </doc>
+ </constructor>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null]]>
+ </doc>
+ </constructor>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file containing this split's data.]]>
+ </doc>
+ </method>
+ <method name="getStart" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The position of the first byte in the file to process.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the file to process.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A section of an input file. Returned by {@link
+ InputFormat#getSplits(JobConf, int)} and passed to
+ {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileSplit -->
+ <!-- start class org.apache.hadoop.mapred.ID -->
+ <class name="ID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable&lt;org.apache.hadoop.mapred.ID&gt;"/>
+ <constructor name="ID" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructs an ID object from the given int]]>
+ </doc>
+ </constructor>
+ <constructor name="ID"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[returns the int which represents the identifier]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare IDs by associated numbers]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.ID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.ID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct an ID object from given string
+
+ @return constructed Id object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A general identifier, which internally stores the id
+ as an integer. This is the super class of {@link JobID},
+ {@link TaskID} and {@link TaskAttemptID}.
+
+ @see JobID
+ @see TaskID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ID -->
+ <!-- start interface org.apache.hadoop.mapred.InputFormat -->
+ <interface name="InputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="validateInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="getSplits is called in the client and can perform any
+ necessary validation of the input">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check for validity of the input-specification for the job.
+
+ <p>This method is used to validate the input directories when a job is
+ submitted so that the {@link JobClient} can fail early, with an useful
+ error message, in case of errors. For e.g. input directory does not exist.
+ </p>
+
+ @param job job configuration.
+ @throws InvalidInputException if the job does not have valid input
+ @deprecated getSplits is called in the client and can perform any
+ necessary validation of the input]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically split the set of input files for the job.
+
+ <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper}
+ for processing.</p>
+
+ <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the
+ input files are not physically split into chunks. For e.g. a split could
+ be <i>&lt;input-file-path, start, offset&gt;</i> tuple.
+
+ @param job job configuration.
+ @param numSplits the desired number of splits, a hint.
+ @return an array of {@link InputSplit}s for the job.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordReader} for the given {@link InputSplit}.
+
+ <p>It is the responsibility of the <code>RecordReader</code> to respect
+ record boundaries while processing the logical split to present a
+ record-oriented view to the individual task.</p>
+
+ @param split the {@link InputSplit}
+ @param job the job that this split belongs to
+ @return a {@link RecordReader}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputFormat</code> describes the input-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the input-specification of the job.
+ <li>
+ Split-up the input file(s) into logical {@link InputSplit}s, each of
+ which is then assigned to an individual {@link Mapper}.
+ </li>
+ <li>
+ Provide the {@link RecordReader} implementation to be used to glean
+ input records from the logical <code>InputSplit</code> for processing by
+ the {@link Mapper}.
+ </li>
+ </ol>
+
+ <p>The default behavior of file-based {@link InputFormat}s, typically
+ sub-classes of {@link FileInputFormat}, is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of the input files. However, the {@link FileSystem} blocksize of
+ the input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Clearly, logical splits based on input-size is insufficient for many
+ applications since record boundaries are to respected. In such cases, the
+ application has to also implement a {@link RecordReader} on whom lies the
+ responsibilty to respect record-boundaries and present a record-oriented
+ view of the logical <code>InputSplit</code> to the individual task.
+
+ @see InputSplit
+ @see RecordReader
+ @see JobClient
+ @see FileInputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.InputSplit -->
+ <interface name="InputSplit" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the total number of bytes in the data of the <code>InputSplit</code>.
+
+ @return the number of bytes in the input split.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hostnames where the input split is located.
+
+ @return list of hostnames where data of the <code>InputSplit</code> is
+ located as an array of <code>String</code>s.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputSplit</code> represents the data to be processed by an
+ individual {@link Mapper}.
+
+ <p>Typically, it presents a byte-oriented view on the input and is the
+ responsibility of {@link RecordReader} of the job to process this and present
+ a record-oriented view.
+
+ @see InputFormat
+ @see RecordReader]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputSplit -->
+ <!-- start class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <class name="InvalidFileTypeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidFileTypeException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidFileTypeException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when file type differs from the desired file type. like
+ getting a file when a directory is expected. Or a wrong file type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidInputException -->
+ <class name="InvalidInputException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidInputException" type="java.util.List&lt;java.io.IOException&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create the exception with the given list.
+ @param probs the list of problems to report. this list is not copied.]]>
+ </doc>
+ </constructor>
+ <method name="getProblems" return="java.util.List&lt;java.io.IOException&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete list of the problems reported.
+ @return the list of problems, which must not be modified]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get a summary message of the problems found.
+ @return the concatenated messages from all of the problems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class wraps a list of problems with the input, so that the user
+ can get a list of problems together instead of finding and fixing them one
+ by one.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidInputException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <class name="InvalidJobConfException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidJobConfException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidJobConfException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when jobconf misses some mendatory attributes
+ or value of some attributes is invalid.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <!-- start class org.apache.hadoop.mapred.IsolationRunner -->
+ <class name="IsolationRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IsolationRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Run a single task
+ @param args the first argument is the task directory]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.IsolationRunner -->
+ <!-- start class org.apache.hadoop.mapred.JobClient -->
+ <class name="JobClient" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobClient"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job client.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client with the given {@link JobConf}, and connect to the
+ default {@link JobTracker}.
+
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client, connect to the indicated job tracker.
+
+ @param jobTrackAddr the job tracker to connect to.
+ @param conf configuration.]]>
+ </doc>
+ </constructor>
+ <method name="getCommandLineConfig" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the command line configuration]]>
+ </doc>
+ </method>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Connect to the default {@link JobTracker}.
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the <code>JobClient</code>.]]>
+ </doc>
+ </method>
+ <method name="getFs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a filesystem handle. We need this to prepare jobs
+ for submission to the MapReduce system.
+
+ @return the filesystem handle.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobFile" type="java.lang.String"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param jobFile the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param job the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an {@link RunningJob} object to track an ongoing job. Returns
+ null if the id does not correspond to any known job.
+
+ @param jobid the jobid of the job.
+ @return the {@link RunningJob} handle to track the job, null if the
+ <code>jobid</code> doesn't correspond to any known job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getJob(JobID)}.">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getJob(JobID)}.]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the map tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the map tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getMapTaskReports(JobID)}">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getMapTaskReports(JobID)}]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the reduce tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the reduce tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getReduceTaskReports(JobID)}">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getReduceTaskReports(JobID)}]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the Map-Reduce cluster.
+
+ @return the status information about the Map-Reduce cluster as an object
+ of {@link ClusterStatus}.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are not completed and not failed.
+
+ @return array of {@link JobStatus} for the running/to-be-run jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are submitted.
+
+ @return array of {@link JobStatus} for the submitted jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="runJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Utility that submits a job, then polls for progress until the job is
+ complete.
+
+ @param job the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Sets the output filter for tasks. only those tasks are printed whose
+ output matches the filter.
+ @param newValue task filter.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the task output filter out of the JobConf.
+
+ @param job the JobConf to examine.
+ @return the filter level.]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Modify the JobConf to set the task output filter.
+
+ @param job the JobConf to modify.
+ @param newValue the value to set.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task output filter.
+ @return task filter.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="getDefaultMaps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the max available Maps in the cluster.
+
+ @return the max available Maps in the cluster
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDefaultReduces" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the max available Reduces in the cluster.
+
+ @return the max available Reduces in the cluster
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getSystemDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Grab the jobtracker system directory path where job-specific files are to be placed.
+
+ @return the system directory where job-specific files are to be placed.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[<code>JobClient</code> is the primary interface for the user-job to interact
+ with the {@link JobTracker}.
+
+ <code>JobClient</code> provides facilities to submit jobs, track their
+ progress, access component-tasks' reports/logs, get the Map-Reduce cluster
+ status information etc.
+
+ <p>The job submission process involves:
+ <ol>
+ <li>
+ Checking the input and output specifications of the job.
+ </li>
+ <li>
+ Computing the {@link InputSplit}s for the job.
+ </li>
+ <li>
+ Setup the requisite accounting information for the {@link DistributedCache}
+ of the job, if necessary.
+ </li>
+ <li>
+ Copying the job's jar and configuration to the map-reduce system directory
+ on the distributed file-system.
+ </li>
+ <li>
+ Submitting the job to the <code>JobTracker</code> and optionally monitoring
+ it's status.
+ </li>
+ </ol></p>
+
+ Normally the user creates the application, describes various facets of the
+ job via {@link JobConf} and then uses the <code>JobClient</code> to submit
+ the job and monitor its progress.
+
+ <p>Here is an example on how to use <code>JobClient</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ job.setInputPath(new Path("in"));
+ job.setOutputPath(new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ </pre></blockquote></p>
+
+ <h4 id="JobControl">Job Control</h4>
+
+ <p>At times clients would chain map-reduce jobs to accomplish complex tasks
+ which cannot be done via a single map-reduce job. This is fairly easy since
+ the output of the job, typically, goes to distributed file-system and that
+ can be used as the input for the next job.</p>
+
+ <p>However, this also means that the onus on ensuring jobs are complete
+ (success/failure) lies squarely on the clients. In such situations the
+ various job-control options are:
+ <ol>
+ <li>
+ {@link #runJob(JobConf)} : submits the job and returns only after
+ the job has completed.
+ </li>
+ <li>
+ {@link #submitJob(JobConf)} : only submits the job, then poll the
+ returned handle to the {@link RunningJob} to query status and make
+ scheduling decisions.
+ </li>
+ <li>
+ {@link JobConf#setJobEndNotificationURI(String)} : setup a notification
+ on job-completion, thus avoiding polling.
+ </li>
+ </ol></p>
+
+ @see JobConf
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient -->
+ <!-- start class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <class name="JobClient.TaskStatusFilter" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobClient.TaskStatusFilter&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <!-- start class org.apache.hadoop.mapred.JobConf -->
+ <class name="JobConf" extends="org.apache.hadoop.conf.Configuration"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <method name="getJar" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user jar for the map-reduce job.
+
+ @return the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJar"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jar" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user jar for the map-reduce job.
+
+ @param jar the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJarByClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the job's jar file by finding an example class location.
+
+ @param cls the example class.]]>
+ </doc>
+ </method>
+ <method name="getSystemDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link JobClient#getSystemDir()} instead.
+ Get the system directory where job-specific files are to be placed.">
+ <doc>
+ <![CDATA[@deprecated Use {@link JobClient#getSystemDir()} instead.
+ Get the system directory where job-specific files are to be placed.
+
+ @return the system directory where job-specific files are to be placed.]]>
+ </doc>
+ </method>
+ <method name="getLocalDirs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="subdir" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a local file name. Files are distributed among configured
+ local directories.]]>
+ </doc>
+ </method>
+ <method name="setInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileInputFormat#setInputPaths(JobConf, Path...)} or
+ {@link FileInputFormat#setInputPaths(JobConf, String)}">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the {@link Path} of the input directory for the map-reduce job.
+
+ @param dir the {@link Path} of the input directory for the map-reduce job.
+ @deprecated Use {@link FileInputFormat#setInputPaths(JobConf, Path...)} or
+ {@link FileInputFormat#setInputPaths(JobConf, String)}]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileInputFormat#addInputPath(JobConf, Path)} or
+ {@link FileInputFormat#addInputPaths(JobConf, String)}">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+
+ @param dir {@link Path} to be added to the list of inputs for
+ the map-reduce job.
+ @deprecated Use {@link FileInputFormat#addInputPath(JobConf, Path)} or
+ {@link FileInputFormat#addInputPaths(JobConf, String)}]]>
+ </doc>
+ </method>
+ <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileInputFormat#getInputPaths(JobConf)}">
+ <doc>
+ <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+
+ @return the list of input {@link Path}s for the map-reduce job.
+ @deprecated Use {@link FileInputFormat#getInputPaths(JobConf)}]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reported username for this job.
+
+ @return the username]]>
+ </doc>
+ </method>
+ <method name="setUser"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the reported username for this job.
+
+ @param user the username for this job.]]>
+ </doc>
+ </method>
+ <method name="setKeepFailedTaskFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the framework should keep the intermediate files for
+ failed tasks.
+
+ @param keep <code>true</code> if framework should keep the intermediate files
+ for failed tasks, <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="getKeepFailedTaskFiles" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should the temporary files for failed tasks be kept?
+
+ @return should the files be kept?]]>
+ </doc>
+ </method>
+ <method name="setKeepTaskFilesPattern"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pattern" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set a regular expression for task names that should be kept.
+ The regular expression ".*_m_000123_0" would keep the files
+ for the first instance of map 123 that ran.
+
+ @param pattern the java.util.regex.Pattern to match against the
+ task names.]]>
+ </doc>
+ </method>
+ <method name="getKeepTaskFilesPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the regular expression that is matched against the task names
+ to see if we need to keep the files.
+
+ @return the pattern as a string, if it was set, othewise null.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the default file system.
+
+ @param dir the new current working directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the default file system.
+
+ @return the directory name.]]>
+ </doc>
+ </method>
+ <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileOutputFormat#getOutputPath(JobConf)} or
+ {@link FileOutputFormat#getWorkOutputPath(JobConf)}
+ Get the {@link Path} to the output directory for the map-reduce job.">
+ <doc>
+ <![CDATA[@deprecated Use {@link FileOutputFormat#getOutputPath(JobConf)} or
+ {@link FileOutputFormat#getWorkOutputPath(JobConf)}
+ Get the {@link Path} to the output directory for the map-reduce job.
+
+ @return the {@link Path} to the output directory for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileOutputFormat#setOutputPath(JobConf, Path)}
+ Set the {@link Path} of the output directory for the map-reduce job.
+
+ lEsS_tHaNp>lEsS_tHaNi>NotelEsS_tHaN/i>:
+ lEsS_tHaN/p>">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[@deprecated Use {@link FileOutputFormat#setOutputPath(JobConf, Path)}
+ Set the {@link Path} of the output directory for the map-reduce job.
+
+ <p><i>Note</i>:
+ </p>
+ @param dir the {@link Path} of the output directory for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getInputFormat" return="org.apache.hadoop.mapred.InputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link InputFormat} implementation for the map-reduce job,
+ defaults to {@link TextInputFormat} if not specified explicity.
+
+ @return the {@link InputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link InputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link InputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="getOutputFormat" return="org.apache.hadoop.mapred.OutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link OutputFormat} implementation for the map-reduce job,
+ defaults to {@link TextOutputFormat} if not specified explicity.
+
+ @return the {@link OutputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setOutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputFormat&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link OutputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link OutputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="setCompressMapOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Should the map outputs be compressed before transfer?
+ Uses the SequenceFile compression.
+
+ @param compress should the map outputs be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressMapOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Are the outputs of the maps be compressed?
+
+ @return <code>true</code> if the outputs of the maps are to be compressed,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="{@link CompressionType} is no longer valid for intermediate
+ map-outputs.">
+ <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionType} for the map outputs.
+
+ @param style the {@link CompressionType} to control how the map outputs
+ are compressed.
+ @deprecated {@link CompressionType} is no longer valid for intermediate
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="{@link CompressionType} is no longer valid for intermediate
+ map-outputs.">
+ <doc>
+ <![CDATA[Get the {@link CompressionType} for the map outputs.
+
+ @return the {@link CompressionType} for map outputs, defaulting to
+ {@link CompressionType#RECORD}.
+ @deprecated {@link CompressionType} is no longer valid for intermediate
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the given class as the {@link CompressionCodec} for the map outputs.
+
+ @param codecClass the {@link CompressionCodec} class that will compress
+ the map outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the map outputs.
+
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} class that should be used to compress the
+ map outputs.
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getMapOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the map output data. If it is not set, use the
+ (final) output key class. This allows the map output key class to be
+ different than the final output key class.
+
+ @return the map output key class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the map output data. This allows the user to
+ specify the map output key class to be different than the final output
+ value class.
+
+ @param theClass the map output key class.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for the map output data. If it is not set, use the
+ (final) output value class This allows the map output value class to be
+ different than the final output value class.
+
+ @return the map output value class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for the map output data. This allows the user to
+ specify the map output value class to be different than the final output
+ value class.
+
+ @param theClass the map output value class.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the job output data.
+
+ @return the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the job output data.
+
+ @param theClass the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link RawComparator} comparator used to compare keys.
+
+ @return the {@link RawComparator} comparator used to compare keys.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyComparatorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.RawComparator&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link RawComparator} comparator used to compare keys.
+
+ @param theClass the {@link RawComparator} comparator used to
+ compare keys.
+ @see #setOutputValueGroupingComparator(Class)]]>
+ </doc>
+ </method>
+ <method name="getOutputValueGroupingComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user defined {@link WritableComparable} comparator for
+ grouping keys of inputs to the reduce.
+
+ @return comparator set by the user for grouping values.
+ @see #setOutputValueGroupingComparator(Class) for details.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueGroupingComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.RawComparator&gt;"/>
+ <doc>
+ <![CDATA[Set the user defined {@link RawComparator} comparator for
+ grouping keys in the input to the reduce.
+
+ <p>This comparator should be provided if the equivalence rules for keys
+ for sorting the intermediates are different from those for grouping keys
+ before each call to
+ {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
+
+ <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
+ in a single call to the reduce function if K1 and K2 compare as equal.</p>
+
+ <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
+ how keys are sorted, this can be used in conjunction to simulate
+ <i>secondary sort on values</i>.</p>
+
+ <p><i>Note</i>: This is not a guarantee of the reduce sort being
+ <i>stable</i> in any sense. (In any case, with the order of available
+ map-outputs to the reduce being non-deterministic, it wouldn't make
+ that much sense.)</p>
+
+ @param theClass the comparator class to be used for grouping keys.
+ It should implement <code>RawComparator</code>.
+ @see #setOutputKeyComparatorClass(Class)]]>
+ </doc>
+ </method>
+ <method name="getOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for job outputs.
+
+ @return the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for job outputs.
+
+ @param theClass the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapperClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Mapper} class for the job.
+
+ @return the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapperClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Mapper} class for the job.
+
+ @param theClass the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getMapRunnerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.MapRunnable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link MapRunnable} class for the job.
+
+ @return the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapRunnerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.MapRunnable&gt;"/>
+ <doc>
+ <![CDATA[Expert: Set the {@link MapRunnable} class for the job.
+
+ Typically used to exert greater control on {@link Mapper}s.
+
+ @param theClass the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getPartitionerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Partitioner&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Partitioner} used to partition {@link Mapper}-outputs
+ to be sent to the {@link Reducer}s.
+
+ @return the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setPartitionerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Partitioner&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Partitioner} class used to partition
+ {@link Mapper}-outputs to be sent to the {@link Reducer}s.
+
+ @param theClass the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getReducerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Reducer} class for the job.
+
+ @return the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setReducerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Reducer} class for the job.
+
+ @param theClass the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getCombinerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers. Typically the combiner is same as the
+ the {@link Reducer} for the job i.e. {@link #getReducerClass()}.
+
+ @return the user-defined combiner class used to combine map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCombinerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"/>
+ <doc>
+ <![CDATA[Set the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers.
+
+ <p>The combiner is a task-level aggregation operation which, in some cases,
+ helps to cut down the amount of data transferred from the {@link Mapper} to
+ the {@link Reducer}, leading to better performance.</p>
+
+ <p>Typically the combiner is same as the the <code>Reducer</code> for the
+ job i.e. {@link #setReducerClass(Class)}.</p>
+
+ @param theClass the user-defined combiner class used to combine
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCombineOnceOnly"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[If true, ensures the combiner is run once and only once on output from
+ the map. Otherwise, combiner may be run zero or more times.]]>
+ </doc>
+ </method>
+ <method name="getCombineOnceOnly" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on, else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getMapSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for map tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be
+ used for this job for map tasks,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for map tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for map tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getReduceSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for reduce tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used
+ for reduce tasks for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setReduceSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for reduce tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for reduce tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getNumMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job.
+ Defaults to <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumMapTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the number of map tasks for this job.
+
+ <p><i>Note</i>: This is only a <i>hint</i> to the framework. The actual
+ number of spawned map tasks depends on the number of {@link InputSplit}s
+ generated by the job's {@link InputFormat#getSplits(JobConf, int)}.
+
+ A custom {@link InputFormat} is typically used to accurately control
+ the number of map tasks for the job.</p>
+
+ <h4 id="NoOfMaps">How many maps?</h4>
+
+ <p>The number of maps is usually driven by the total size of the inputs
+ i.e. total number of blocks of the input files.</p>
+
+ <p>The right level of parallelism for maps seems to be around 10-100 maps
+ per-node, although it has been set up to 300 or so for very cpu-light map
+ tasks. Task setup takes awhile, so it is best if the maps take at least a
+ minute to execute.</p>
+
+ <p>The default behavior of file-based {@link InputFormat}s is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of input files. However, the {@link FileSystem} blocksize of the
+ input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Thus, if you expect 10TB of input data and have a blocksize of 128MB,
+ you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is
+ used to set it even higher.</p>
+
+ @param n the number of map tasks for this job.
+ @see InputFormat#getSplits(JobConf, int)
+ @see FileInputFormat
+ @see FileSystem#getDefaultBlockSize()
+ @see FileStatus#getBlockSize()]]>
+ </doc>
+ </method>
+ <method name="getNumReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job. Defaults to
+ <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumReduceTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the requisite number of reduce tasks for this job.
+
+ <h4 id="NoOfReduces">How many reduces?</h4>
+
+ <p>The right number of reduces seems to be <code>0.95</code> or
+ <code>1.75</code> multiplied by (&lt;<i>no. of nodes</i>&gt; *
+ <a href="{@docRoot}/../hadoop-default.html#mapred.tasktracker.reduce.tasks.maximum">
+ mapred.tasktracker.reduce.tasks.maximum</a>).
+ </p>
+
+ <p>With <code>0.95</code> all of the reduces can launch immediately and
+ start transfering map outputs as the maps finish. With <code>1.75</code>
+ the faster nodes will finish their first round of reduces and launch a
+ second wave of reduces doing a much better job of load balancing.</p>
+
+ <p>Increasing the number of reduces increases the framework overhead, but
+ increases load balancing and lowers the cost of failures.</p>
+
+ <p>The scaling factors above are slightly less than whole numbers to
+ reserve a few reduce slots in the framework for speculative-tasks, failures
+ etc.</p>
+
+ <h4 id="ReducerNone">Reducer NONE</h4>
+
+ <p>It is legal to set the number of reduce-tasks to <code>zero</code>.</p>
+
+ <p>In this case the output of the map-tasks directly go to distributed
+ file-system, to the path set by
+ {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Also, the
+ framework doesn't sort the map-outputs before writing it out to HDFS.</p>
+
+ @param n the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ map task, as specified by the <code>mapred.map.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ map task.
+
+ @param n the number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ reduce task, as specified by the <code>mapred.reduce.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ reduce task.
+
+ @param n the number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name. This is only used to identify the
+ job to the user.
+
+ @return the job's name, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified job name.
+
+ @param name the job's new name.]]>
+ </doc>
+ </method>
+ <method name="getSessionId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified session identifier. The default is the empty string.
+
+ The session identifier is used to tag metric data that is reported to some
+ performance metrics system via the org.apache.hadoop.metrics API. The
+ session identifier is intended, in particular, for use by Hadoop-On-Demand
+ (HOD) which allocates a virtual Hadoop cluster dynamically and transiently.
+ HOD will set the session identifier by modifying the hadoop-site.xml file
+ before starting the cluster.
+
+ When not running under HOD, this identifer is expected to remain set to
+ the empty string.
+
+ @return the session identifier, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setSessionId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sessionId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified session identifier.
+
+ @param sessionId the new session id.]]>
+ </doc>
+ </method>
+ <method name="setMaxTaskFailuresPerTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="noFailures" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds <code>noFailures</code>, the
+ tasktracker is <i>blacklisted</i> for this job.
+
+ @param noFailures maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxTaskFailuresPerTracker" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Expert: Get the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds this, the tasktracker is
+ <i>blacklisted</i> for this job.
+
+ @return the maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of map tasks that can fail without
+ the job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed map-task results in
+ the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the maximum percentage of map tasks that can fail without the
+ job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts} attempts
+ before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of reduce tasks that can fail without
+ the job being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed reduce-task results
+ in the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum percentage of reduce tasks that can fail without the job
+ being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="prio" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Set {@link JobPriority} for this job.
+
+ @param prio the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link JobPriority} for this job.
+
+ @return the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getProfileEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get whether the task profiling is enabled.
+ @return true if some tasks will be profiled]]>
+ </doc>
+ </method>
+ <method name="setProfileEnabled"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the system should collect profiler information for some of
+ the tasks in this job? The information is stored in the the user log
+ directory.
+ @param newValue true means it should be gathered]]>
+ </doc>
+ </method>
+ <method name="getProfileParams" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the profiler configuration arguments.
+
+ The default value for this property is
+ "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s"
+
+ @return the parameters to pass to the task child to configure profiling]]>
+ </doc>
+ </method>
+ <method name="setProfileParams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the profiler configuration arguments. If the string contains a '%s' it
+ will be replaced with the name of the profiling output file when the task
+ runs.
+
+ This value is passed to the task child JVM on the command line.
+
+ @param value the configuration string]]>
+ </doc>
+ </method>
+ <method name="getProfileTaskRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <doc>
+ <![CDATA[Get the range of maps or reduces to profile.
+ @param isMap is the task a map?
+ @return the task ranges]]>
+ </doc>
+ </method>
+ <method name="setProfileTaskRange"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <param name="newValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the ranges of maps or reduces to profile. setProfileEnabled(true)
+ must also be called.
+ @param newValue a set of integer ranges of the map ids]]>
+ </doc>
+ </method>
+ <method name="setMapDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the map tasks fail.
+
+ <p>The debug script can aid debugging of failed map tasks. The script is
+ given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script needs to be symlinked. </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setMapDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param mDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getMapDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the map task's debug script.
+
+ @return the debug Script for the mapred job for failed map tasks.
+ @see #setMapDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="setReduceDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the reduce tasks fail.
+
+ <p>The debug script can aid debugging of failed reduce tasks. The script
+ is given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script file needs to be symlinked </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setReduceDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param rDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getReduceDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reduce task's debug Script
+
+ @return the debug script for the mapred job for failed reduce tasks.
+ @see #setReduceDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="getJobEndNotificationURI" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ @return the job end notification uri, <code>null</code> if it hasn't
+ been set.
+ @see #setJobEndNotificationURI(String)]]>
+ </doc>
+ </method>
+ <method name="setJobEndNotificationURI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ <p>The uri can contain 2 special parameters: <tt>$jobId</tt> and
+ <tt>$jobStatus</tt>. Those, if present, are replaced by the job's
+ identifier and completion-status respectively.</p>
+
+ <p>This is typically used by application-writers to implement chaining of
+ Map-Reduce jobs in an <i>asynchronous manner</i>.</p>
+
+ @param uri the job end notification uri
+ @see JobStatus
+ @see <a href="{@docRoot}/org/apache/hadoop/mapred/JobClient.html#JobCompletionAndChaining">Job Completion and Chaining</a>]]>
+ </doc>
+ </method>
+ <method name="getJobLocalDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get job-specific shared directory for use as scratch space
+
+ <p>
+ When a job starts, a shared directory is created at location
+ <code>
+ ${mapred.local.dir}/taskTracker/jobcache/$jobid/work/ </code>.
+ This directory is exposed to the users through
+ <code>job.local.dir </code>.
+ So, the tasks can use this space
+ as scratch space and share files among them. </p>
+ This value is available as System property also.
+
+ @return The localized job specific shared directory]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A map/reduce job configuration.
+
+ <p><code>JobConf</code> is the primary interface for a user to describe a
+ map-reduce job to the Hadoop framework for execution. The framework tries to
+ faithfully execute the job as-is described by <code>JobConf</code>, however:
+ <ol>
+ <li>
+ Some configuration parameters might have been marked as
+ <a href="{@docRoot}/org/apache/hadoop/conf/Configuration.html#FinalParams">
+ final</a> by administrators and hence cannot be altered.
+ </li>
+ <li>
+ While some job parameters are straight-forward to set
+ (e.g. {@link #setNumReduceTasks(int)}), some parameters interact subtly
+ rest of the framework and/or job-configuration and is relatively more
+ complex for the user to control finely (e.g. {@link #setNumMapTasks(int)}).
+ </li>
+ </ol></p>
+
+ <p><code>JobConf</code> typically specifies the {@link Mapper}, combiner
+ (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and
+ {@link OutputFormat} implementations to be used etc.
+
+ <p>Optionally <code>JobConf</code> is used to specify other advanced facets
+ of the job such as <code>Comparator</code>s to be used, files to be put in
+ the {@link DistributedCache}, whether or not intermediate and/or job outputs
+ are to be compressed (and how), debugability via user-provided scripts
+ ( {@link #setMapDebugScript(String)}/{@link #setReduceDebugScript(String)}),
+ for doing post-processing on task logs, task's stdout, stderr, syslog.
+ and etc.</p>
+
+ <p>Here is an example on how to configure a job via <code>JobConf</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ FileInputFormat.setInputPaths(job, new Path("in"));
+ FileOutputFormat.setOutputPath(job, new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setCombinerClass(MyJob.MyReducer.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ job.setInputFormat(SequenceFileInputFormat.class);
+ job.setOutputFormat(SequenceFileOutputFormat.class);
+ </pre></blockquote></p>
+
+ @see JobClient
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobConf -->
+ <!-- start interface org.apache.hadoop.mapred.JobConfigurable -->
+ <interface name="JobConfigurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Initializes a new instance from a {@link JobConf}.
+
+ @param job the configuration]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[That what may be configured.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobConfigurable -->
+ <!-- start class org.apache.hadoop.mapred.JobEndNotifier -->
+ <class name="JobEndNotifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobEndNotifier"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="startNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="stopNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="registerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ <method name="localRunnerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobEndNotifier -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory -->
+ <class name="JobHistory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="hostname" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Initialize JobHistory files.
+ @param conf Jobconf of the job tracker.
+ @param hostname jobtracker's hostname
+ @return true if intialized properly
+ false otherwise]]>
+ </doc>
+ </method>
+ <method name="parseHistoryFromFS"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="l" type="org.apache.hadoop.mapred.JobHistory.Listener"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parses history file and invokes Listener.handle() for
+ each line of history. It can be used for looking through history
+ files for specific items without having to keep whole history in memory.
+ @param path path to history file
+ @param l Listener for history events
+ @param fs FileSystem where history file is present
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isDisableHistory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns history disable status. by default history is enabled so this
+ method returns false.
+ @return true if history logging is disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="setDisableHistory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="disableHistory" type="boolean"/>
+ <doc>
+ <![CDATA[Enable/disable history logging. Default value is false, so history
+ is enabled by default.
+ @param disableHistory true if history should be disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <field name="JOBTRACKER_START_TIME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provides methods for writing to and reading from job history.
+ Job History works in an append mode, JobHistory and its inner classes provide methods
+ to log job events.
+
+ JobHistory is split into multiple files, format of each file is plain text where each line
+ is of the format [type (key=value)*], where type identifies the type of the record.
+ Type maps to UID of one of the inner classes of this class.
+
+ Job history is maintained in a master index which contains star/stop times of all jobs with
+ a few other job level properties. Apart from this each job's history is maintained in a seperate history
+ file. name of job history files follows the format jobtrackerId_jobid
+
+ For parsing the job history it supports a listener based interface where each line is parsed
+ and passed to listener. The listener can create an object model of history or look for specific
+ events and discard rest of the history.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <class name="JobHistory.HistoryCleaner" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobHistory.HistoryCleaner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Cleans up history data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Delete history files older than one month. Update master index and remove all
+ jobs older than one month. Also if a job tracker has no jobs in last one month
+ remove reference to the job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <class name="JobHistory.JobInfo" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.JobInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create new JobInfo]]>
+ </doc>
+ </constructor>
+ <method name="getAllTasks" return="java.util.Map&lt;java.lang.String, org.apache.hadoop.mapred.JobHistory.Task&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all map and reduce tasks <taskid-Task>.]]>
+ </doc>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Get the path of the locally stored job file
+ @param jobId id of the job
+ @return the path of the job file on the local file system]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFile" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the path of the job-history
+ log file.
+
+ @param logFile path of the job-history file
+ @return URL encoded path
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL encoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="decodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to decode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL decoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logSubmitted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="jobConfPath" type="java.lang.String"/>
+ <param name="submitTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="logSubmitted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="jobConfPath" type="java.lang.String"/>
+ <param name="submitTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Log job submitted event to history. Creates a new file in history
+ for the job. if history file creation fails, it disables history
+ for all other events.
+ @param jobId job id assigned by job tracker.
+ @param jobConf job conf of the job
+ @param jobConfPath path to job conf xml file in HDFS.
+ @param submitTime time when job tracker received the job
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs launch time of job.
+ @param jobId job id, assigned by jobtracker.
+ @param startTime start time of job.
+ @param totalMaps total maps assigned by jobtracker.
+ @param totalReduces total reduces.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <param name="failedMaps" type="int"/>
+ <param name="failedReduces" type="int"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="finishTime" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <param name="failedMaps" type="int"/>
+ <param name="failedReduces" type="int"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log job finished. closes the job file in history.
+ @param jobId job id, assigned by jobtracker.
+ @param finishTime finish time of job in ms.
+ @param finishedMaps no of maps successfully finished.
+ @param finishedReduces no of reduces finished sucessfully.
+ @param failedMaps no of failed map tasks.
+ @param failedReduces no of failed reduce tasks.
+ @param counters the counters from the job]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs job failed event. Closes the job history log file.
+ @param jobid job id
+ @param timestamp time when job failure was detected in ms.
+ @param finishedMaps no finished map tasks.
+ @param finishedReduces no of finished reduce tasks.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to job start, finish or failure.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <class name="JobHistory.Keys" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.Keys&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Keys[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Keys"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Job history files contain key="value" pairs, where keys belong to this enum.
+ It acts as a global namespace for all keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <!-- start interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <interface name="JobHistory.Listener" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="handle"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recType" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"/>
+ <param name="values" type="java.util.Map&lt;org.apache.hadoop.mapred.JobHistory.Keys, java.lang.String&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Callback method for history parser.
+ @param recType type of record, which is the first entry in the line.
+ @param values a map of key-value pairs as thry appear in history.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Callback interface for reading back log events from JobHistory. This interface
+ should be implemented and passed to JobHistory.parseHistory()]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <class name="JobHistory.MapAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.MapAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of this map task attempt.
+ @param taskAttemptId task attempt id
+ @param startTime start time of task attempt as reported by task tracker.
+ @param hostName host name of the task attempt.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="host" type="java.lang.String"/>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finish time of map task attempt.
+ @param taskAttemptId task attempt id
+ @param finishTime finish time
+ @param hostName host name]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="host" type="java.lang.String"/>
+ <param name="err" type="java.lang.String"/>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt failed event.
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostname" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt killed event.
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <class name="JobHistory.RecordTypes" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.RecordTypes&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.RecordTypes[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Record types are identifiers for each line of log in history files.
+ A record type appears as the first token in a single line of log.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <class name="JobHistory.ReduceAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.ReduceAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of Reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param startTime start time
+ @param hostName host name]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostname" type="java.lang.String"/>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finished event of this task.
+ @param taskAttemptId task attempt id
+ @param shuffleFinished shuffle finish time
+ @param sortFinished sort finish time
+ @param finishTime finish time of task
+ @param hostName host name where task attempt executed]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostname" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log failed reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="attemptid" type="java.lang.String"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostname" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log killed reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Task -->
+ <class name="JobHistory.Task" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.Task"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="java.lang.String"/>
+ <param name="taskId" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="splitLocations" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of task (TIP).
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param startTime startTime of tip.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finish time of task.
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param finishTime finish timeof task in ms]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log job failed event.
+ @param taskId task id
+ @param taskType MAP or REDUCE.
+ @param time timestamp when job failed detected.
+ @param error error message for failure.]]>
+ </doc>
+ </method>
+ <method name="getTaskAttempts" return="java.util.Map&lt;java.lang.String, org.apache.hadoop.mapred.JobHistory.TaskAttempt&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all task attempts for this task. <task attempt id - TaskAttempt>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to Task's start, finish or failure.
+ All events logged by this class are logged in a separate file per job in
+ job tracker history. These events map to TIPs in jobtracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Task -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <class name="JobHistory.TaskAttempt" extends="org.apache.hadoop.mapred.JobHistory.Task"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.TaskAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Base class for Map and Reduce TaskAttempts.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Values -->
+ <class name="JobHistory.Values" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.Values&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Values[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Values"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[This enum contains some of the values commonly used by history log events.
+ since values in history can only be strings - Values.name() is used in
+ most places in history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Values -->
+ <!-- start class org.apache.hadoop.mapred.JobID -->
+ <class name="JobID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobID" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a JobID object
+ @param jtIdentifier jobTracker identifier
+ @param id job number]]>
+ </doc>
+ </constructor>
+ <method name="getJtIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare JobIds by first jtIdentifiers, then by job numbers]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a JobId object from given string
+ @return constructed JobId object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getJobIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>any job</i>
+ run on the jobtracker started at <i>200707121733</i>, we would use :
+ <pre>
+ JobID.getTaskIDsPattern("200707121733", null);
+ </pre>
+ which will return :
+ <pre> "job_200707121733_[0-9]*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @return a regex pattern matching JobIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[JobID represents the immutable and unique identifier for
+ the job. JobID consists of two parts. First part
+ represents the jobtracker identifier, so that jobID to jobtracker map
+ is defined. For cluster setup this string is the jobtracker
+ start time, for local setting, it is "local".
+ Second part of the JobID is the job number. <br>
+ An example JobID is :
+ <code>job_200707121733_0003</code> , which represents the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse JobID strings, but rather
+ use appropriate constructors or {@link #forName(String)} method.
+
+ @see TaskID
+ @see TaskAttemptID
+ @see JobTracker#getNewJobId()
+ @see JobTracker#getStartTime()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobID -->
+ <!-- start class org.apache.hadoop.mapred.JobPriority -->
+ <class name="JobPriority" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobPriority&gt;"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobPriority[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Used to describe the priority of the running job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobPriority -->
+ <!-- start class org.apache.hadoop.mapred.JobProfile -->
+ <class name="JobProfile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobProfile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an empty {@link JobProfile}.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, org.apache.hadoop.mapred.JobID, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a {@link JobProfile} the userid, jobid,
+ job config-file, job-details url and job name.
+
+ @param user userid of the person who submitted the job.
+ @param jobid id of the job.
+ @param jobFile job configuration file.
+ @param url link to the web-ui for details of the job.
+ @param name user-specified job name.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="use JobProfile(String, JobID, String, String, String) instead">
+ <doc>
+ <![CDATA[@deprecated use JobProfile(String, JobID, String, String, String) instead]]>
+ </doc>
+ </constructor>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user id.]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job id.]]>
+ </doc>
+ </method>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use getJobID() instead">
+ <doc>
+ <![CDATA[@deprecated use getJobID() instead]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configuration file for the job.]]>
+ </doc>
+ </method>
+ <method name="getURL" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the link to the web-ui for details of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A JobProfile is a MapReduce primitive. Tracks a job,
+ whether living or dead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobProfile -->
+ <!-- start class org.apache.hadoop.mapred.JobShell -->
+ <class name="JobShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run method from Tool]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Provide command line parsing for JobSubmission
+ job submission looks like
+ hadoop jar -libjars <comma seperated jars> -archives <comma seperated archives>
+ -files <comma seperated files> inputjar args]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobShell -->
+ <!-- start class org.apache.hadoop.mapred.JobStatus -->
+ <class name="JobStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobStatus" type="java.lang.String, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job]]>
+ </doc>
+ </constructor>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use getJobID instead">
+ <doc>
+ <![CDATA[@deprecated use getJobID instead]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The jobid of the Job]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in maps]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in reduce]]>
+ </doc>
+ </method>
+ <method name="getRunState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return running state of the job]]>
+ </doc>
+ </method>
+ <method name="setRunState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Change the current run state of the job.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return start time of the job]]>
+ </doc>
+ </method>
+ <method name="getUsername" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the username of the job]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SUCCEEDED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PREP" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Describes the current status of a job. This is
+ not intended to be a comprehensive piece of data.
+ For that, look at JobProfile.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobStatus -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker -->
+ <class name="JobTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.InterTrackerProtocol"/>
+ <implements name="org.apache.hadoop.mapred.JobSubmissionProtocol"/>
+ <method name="startTracker" return="org.apache.hadoop.mapred.JobTracker"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker with given configuration.
+
+ The conf will be modified to reflect the actual ports on which
+ the JobTracker is up and running if the user passes the port as
+ <code>zero</code>.
+
+ @param conf configuration for the JobTracker.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="offerService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Run forever]]>
+ </doc>
+ </method>
+ <method name="getTotalSubmissions" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobTrackerMachine" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTrackerIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the unique identifier (ie. timestamp) of this job tracker start.
+ @return a string with a unique identifier]]>
+ </doc>
+ </method>
+ <method name="getTrackerPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="runningJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRunningJobs" return="java.util.List&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version that is called from a timer thread, and therefore needs to be
+ careful to synchronize.]]>
+ </doc>
+ </method>
+ <method name="failedJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="completedJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="taskTrackers" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskTracker" return="org.apache.hadoop.mapred.TaskTrackerStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trackerID" type="java.lang.String"/>
+ </method>
+ <method name="resolveAndAddToTopology" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getNodesAtMaxLevel" return="java.util.Collection&lt;org.apache.hadoop.net.Node&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a collection of nodes at the max level]]>
+ </doc>
+ </method>
+ <method name="getParentNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <param name="level" type="int"/>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the Node in the network topology that corresponds to the hostname]]>
+ </doc>
+ </method>
+ <method name="getNumTaskCacheLevels" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumResolvedTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="heartbeat" return="org.apache.hadoop.mapred.HeartbeatResponse"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskTrackerStatus"/>
+ <param name="initialContact" type="boolean"/>
+ <param name="acceptNewTasks" type="boolean"/>
+ <param name="responseId" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The periodic heartbeat mechanism between the {@link TaskTracker} and
+ the {@link JobTracker}.
+
+ The {@link JobTracker} processes the status information sent by the
+ {@link TaskTracker} and responds with instructions to start/stop
+ tasks or jobs, and also 'reset' instructions during contingencies.]]>
+ </doc>
+ </method>
+ <method name="getFilesystemName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Grab the local fs name]]>
+ </doc>
+ </method>
+ <method name="reportTaskTrackerError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTracker" type="java.lang.String"/>
+ <param name="errorClass" type="java.lang.String"/>
+ <param name="errorMessage" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNewJobId" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Allocates a new JobId string.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[JobTracker.submitJob() kicks off a new job.
+
+ Create a 'JobInProgress' object, which contains both JobProfile
+ and JobStatus. Those two sub-objects are sometimes shipped outside
+ of the JobTracker. But JobInProgress adds info that's useful for
+ the JobTracker alone.
+
+ We add the JIP to the jobInitQueue, which is processed
+ asynchronously to handle split-computation and build up
+ the right TaskTracker/Block mapping.]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ </method>
+ <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ </method>
+ <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ </method>
+ <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="fromid" type="int"/>
+ <param name="maxevents" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxEvents" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="tipid" type="java.lang.String"/>
+ <param name="taskid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the diagnostics for a given task
+ @param taskId the id of the task
+ @return an array of the diagnostic messages]]>
+ </doc>
+ </method>
+ <method name="getTip" return="org.apache.hadoop.mapred.TaskInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tipid" type="org.apache.hadoop.mapred.TaskID"/>
+ <doc>
+ <![CDATA[Returns specified TaskInProgress, or null.]]>
+ </doc>
+ </method>
+ <method name="killTask" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="killTask" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a Task to be killed]]>
+ </doc>
+ </method>
+ <method name="getAssignedTracker" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ </method>
+ <method name="getAssignedTracker" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Get tracker name for a given task id.
+ @param taskId the name of the task
+ @return The name of the task tracker]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSystemDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@see org.apache.hadoop.mapred.JobSubmissionProtocol#getSystemDir()]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.JobInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.JobInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Get the localized job file path on the job trackers local file system
+ @param jobId id of the job
+ @return the path of the job conf file on the local file system]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker process. This is used only for debugging. As a rule,
+ JobTracker should be run as part of the DFS Namenode process.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[JobTracker is the central location for submitting and
+ tracking MR jobs in a network environment.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <class name="JobTracker.IllegalStateException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobTracker.IllegalStateException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A client tried to submit a job before the Job Tracker was ready.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.State -->
+ <class name="JobTracker.State" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobTracker.State&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobTracker.State[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.State -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <class name="KeyValueLineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="KeyValueLineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="findSeparator" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="sep" type="byte"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class treats a line in the input as a key/value pair separated by a
+ separator character. The separator can be specified in config file
+ under the attribute name key.value.separator.in.input.line. The default
+ separator is the tab character ('\t').]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <class name="KeyValueTextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="KeyValueTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Each line
+ is divided into key and value parts by a separator byte. If no such a byte
+ exists, the key will be the entire line and value will be empty.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader -->
+ <class name="LineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="LineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.LongWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.LongWritable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the progress within the split]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Treats keys as offset in file and value as line.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <class name="LineRecordReader.LineReader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LineRecordReader.LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ <code>io.file.buffer.size</code> specified in the given
+ <code>Configuration</code>.
+ @param in input stream
+ @param conf configuration
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the underlying stream.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <param name="maxLineLength" type="int"/>
+ <param name="maxBytesToConsume" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @param maxLineLength the maximum number of bytes to store into str.
+ @param maxBytesToConsume the maximum number of bytes to consume in this call.
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <param name="maxLineLength" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @param maxLineLength the maximum number of bytes to store into str.
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides a line reader from an input stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <!-- start class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <class name="MapFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.MapFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getEntry" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readers" type="org.apache.hadoop.io.MapFile.Reader[]"/>
+ <param name="partitioner" type="org.apache.hadoop.mapred.Partitioner&lt;K, V&gt;"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an entry from output generated by this class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link MapFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.Mapper -->
+ <interface name="Mapper" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1"/>
+ <param name="value" type="V1"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Maps a single input key/value pair into an intermediate key/value pair.
+
+ <p>Output pairs need not be of the same types as input pairs. A given
+ input pair may map to zero or many output pairs. Output pairs are
+ collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the input key.
+ @param value the input value.
+ @param output collects mapped keys and values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Maps input key/value pairs to a set of intermediate key/value pairs.
+
+ <p>Maps are the individual tasks which transform input records into a
+ intermediate records. The transformed intermediate records need not be of
+ the same type as the input records. A given input pair may map to zero or
+ many output pairs.</p>
+
+ <p>The Hadoop Map-Reduce framework spawns one map task for each
+ {@link InputSplit} generated by the {@link InputFormat} for the job.
+ <code>Mapper</code> implementations can access the {@link JobConf} for the
+ job via the {@link JobConfigurable#configure(JobConf)} and initialize
+ themselves. Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p>The framework then calls
+ {@link #map(Object, Object, OutputCollector, Reporter)}
+ for each key/value pair in the <code>InputSplit</code> for that task.</p>
+
+ <p>All intermediate values associated with a given output key are
+ subsequently grouped by the framework, and passed to a {@link Reducer} to
+ determine the final output. Users can control the grouping by specifying
+ a <code>Comparator</code> via
+ {@link JobConf#setOutputKeyComparatorClass(Class)}.</p>
+
+ <p>The grouped <code>Mapper</code> outputs are partitioned per
+ <code>Reducer</code>. Users can control which keys (and hence records) go to
+ which <code>Reducer</code> by implementing a custom {@link Partitioner}.
+
+ <p>Users can optionally specify a <code>combiner</code>, via
+ {@link JobConf#setCombinerClass(Class)}, to perform local aggregation of the
+ intermediate outputs, which helps to cut down the amount of data transferred
+ from the <code>Mapper</code> to the <code>Reducer</code>.
+
+ <p>The intermediate, grouped outputs are always stored in
+ {@link SequenceFile}s. Applications can specify if and how the intermediate
+ outputs are to be compressed and which {@link CompressionCodec}s are to be
+ used via the <code>JobConf</code>.</p>
+
+ <p>If the job has
+ <a href="{@docRoot}/org/apache/hadoop/mapred/JobConf.html#ReducerNone">zero
+ reduces</a> then the output of the <code>Mapper</code> is directly written
+ to the {@link FileSystem} without grouping by keys.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyMapper&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Mapper&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String mapTaskId;
+ private String inputFile;
+ private int noRecords = 0;
+
+ public void configure(JobConf job) {
+ mapTaskId = job.get("mapred.task.id");
+ inputFile = job.get("mapred.input.file");
+ }
+
+ public void map(K key, V val,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ // reporter.progress();
+
+ // Process some more
+ // ...
+ // ...
+
+ // Increment the no. of &lt;key, value&gt; pairs processed
+ ++noRecords;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 records update application-level status
+ if ((noRecords%100) == 0) {
+ reporter.setStatus(mapTaskId + " processed " + noRecords +
+ " from input-file: " + inputFile);
+ }
+
+ // Output the result
+ output.collect(key, val);
+ }
+ }
+ </pre></blockquote></p>
+
+ <p>Applications may write a custom {@link MapRunnable} to exert greater
+ control on map processing e.g. multi-threaded <code>Mapper</code>s etc.</p>
+
+ @see JobConf
+ @see InputFormat
+ @see Partitioner
+ @see Reducer
+ @see MapReduceBase
+ @see MapRunnable
+ @see SequenceFile]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Mapper -->
+ <!-- start class org.apache.hadoop.mapred.MapReduceBase -->
+ <class name="MapReduceBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="MapReduceBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for {@link Mapper} and {@link Reducer} implementations.
+
+ <p>Provides default no-op implementations for a few methods, most non-trivial
+ applications need to override some of them.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapReduceBase -->
+ <!-- start interface org.apache.hadoop.mapred.MapRunnable -->
+ <interface name="MapRunnable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start mapping input <tt>&lt;key, value&gt;</tt> pairs.
+
+ <p>Mapping of input records to output records is complete when this method
+ returns.</p>
+
+ @param input the {@link RecordReader} to read the input records.
+ @param output the {@link OutputCollector} to collect the outputrecords.
+ @param reporter {@link Reporter} to report progress, status-updates etc.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Expert: Generic interface for {@link Mapper}s.
+
+ <p>Custom implementations of <code>MapRunnable</code> can exert greater
+ control on map processing e.g. multi-threaded, asynchronous mappers etc.</p>
+
+ @see Mapper]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.MapRunnable -->
+ <!-- start class org.apache.hadoop.mapred.MapRunner -->
+ <class name="MapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="MapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMapper" return="org.apache.hadoop.mapred.Mapper&lt;K1, V1, K2, V2&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Default {@link MapRunnable} implementation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapRunner -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <class name="MultiFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;K, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultiFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s
+ in {@link #getSplits(JobConf, int)} method. Splits are constructed from
+ the files under the input paths. Each split returned contains <i>nearly</i>
+ equal content length. <br>
+ Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)}
+ to construct <code>RecordReader</code>'s for <code>MultiFileSplit</code>'s.
+ @see MultiFileSplit]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileSplit -->
+ <class name="MultiFileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="MultiFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLengths" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array containing the lengths of the files in
+ the split]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the length of the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getNumPaths" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all the Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A sub-collection of input files. Unlike {@link FileSplit}, MultiFileSplit
+ class does not represent a split of a file, but a split of input files
+ into smaller sets. The atomic unit of split is a file. <br>
+ MultiFileSplit can be used to implement {@link RecordReader}'s, with
+ reading one record per file.
+ @see FileSplit
+ @see MultiFileInputFormat]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileSplit -->
+ <!-- start interface org.apache.hadoop.mapred.OutputCollector -->
+ <interface name="OutputCollector" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="collect"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Adds a key/value pair to the output.
+
+ @param key the key to collect.
+ @param value to value to collect.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Collects the <code>&lt;key, value&gt;</code> pairs output by {@link Mapper}s
+ and {@link Reducer}s.
+
+ <p><code>OutputCollector</code> is the generalization of the facility
+ provided by the Map-Reduce framework to collect data output by either the
+ <code>Mapper</code> or the <code>Reducer</code> i.e. intermediate outputs
+ or the output of the job.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputCollector -->
+ <!-- start interface org.apache.hadoop.mapred.OutputFormat -->
+ <interface name="OutputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordWriter} for the given job.
+
+ @param ignored
+ @param job configuration for the job whose output is being written.
+ @param name the unique name for this part of the output.
+ @param progress mechanism for reporting progress while writing to file.
+ @return a {@link RecordWriter} to write the output for the job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check for validity of the output-specification for the job.
+
+ <p>This is to validate the output specification for the job when it is
+ a job is submitted. Typically checks that it does not already exist,
+ throwing an exception when it already exists, so that output is not
+ overwritten.</p>
+
+ @param ignored
+ @param job job configuration.
+ @throws IOException when output should not be attempted]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>OutputFormat</code> describes the output-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the output-specification of the job. For e.g. check that the
+ output directory doesn't already exist.
+ <li>
+ Provide the {@link RecordWriter} implementation to be used to write out
+ the output files of the job. Output files are stored in a
+ {@link FileSystem}.
+ </li>
+ </ol>
+
+ @see RecordWriter
+ @see JobConf]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.OutputFormatBase -->
+ <class name="OutputFormatBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link FileOutputFormat}">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="OutputFormatBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setCompressOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+ <code>false</code> otherwise]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+ compress the job outputs]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the
+ job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A base class for {@link OutputFormat}.
+ @deprecated Use {@link FileOutputFormat}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputFormatBase -->
+ <!-- start class org.apache.hadoop.mapred.OutputLogFilter -->
+ <class name="OutputLogFilter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.PathFilter"/>
+ <constructor name="OutputLogFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <doc>
+ <![CDATA[This class filters log files from directory given
+ It doesnt accept paths having _logs.
+ This can be used to list paths of output directory as follows:
+ Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
+ new OutputLogFilter()));]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputLogFilter -->
+ <!-- start interface org.apache.hadoop.mapred.Partitioner -->
+ <interface name="Partitioner" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numPartitions" type="int"/>
+ <doc>
+ <![CDATA[Get the paritition number for a given key (hence record) given the total
+ number of partitions i.e. number of reduce-tasks for the job.
+
+ <p>Typically a hash function on a all or a subset of the key.</p>
+
+ @param key the key to be paritioned.
+ @param value the entry value.
+ @param numPartitions the total number of partitions.
+ @return the partition number for the <code>key</code>.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partitions the key space.
+
+ <p><code>Partitioner</code> controls the partitioning of the keys of the
+ intermediate map-outputs. The key (or a subset of the key) is used to derive
+ the partition, typically by a hash function. The total number of partitions
+ is the same as the number of reduce tasks for the job. Hence this controls
+ which of the <code>m</code> reduce tasks the intermediate key (and hence the
+ record) is sent for reduction.</p>
+
+ @see Reducer]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Partitioner -->
+ <!-- start interface org.apache.hadoop.mapred.RecordReader -->
+ <interface name="RecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the next key/value pair from the input for processing.
+
+ @param key the key to read data into
+ @param value the value to read data into
+ @return true iff a key/value was read, false if at EOF]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a key.
+
+ @return a new key object.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a value.
+
+ @return a new value object.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current position in the input.
+
+ @return the current position in the input.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this {@link InputSplit} to future operations.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[How much of the input has the {@link RecordReader} consumed i.e.
+ has been processed by?
+
+ @return progress from <code>0.0</code> to <code>1.0</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordReader</code> reads &lt;key, value&gt; pairs from an
+ {@link InputSplit}.
+
+ <p><code>RecordReader</code>, typically, converts the byte-oriented view of
+ the input, provided by the <code>InputSplit</code>, and presents a
+ record-oriented view for the {@link Mapper} & {@link Reducer} tasks for
+ processing. It thus assumes the responsibility of processing record
+ boundaries and presenting the tasks with keys and values.</p>
+
+ @see InputSplit
+ @see InputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordReader -->
+ <!-- start interface org.apache.hadoop.mapred.RecordWriter -->
+ <interface name="RecordWriter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes a key/value pair.
+
+ @param key the key to write.
+ @param value the value to write.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this <code>RecordWriter</code> to future operations.
+
+ @param reporter facility to report progress.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordWriter</code> writes the output &lt;key, value&gt; pairs
+ to an output file.
+
+ <p><code>RecordWriter</code> implementations write the job outputs to the
+ {@link FileSystem}.
+
+ @see OutputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordWriter -->
+ <!-- start interface org.apache.hadoop.mapred.Reducer -->
+ <interface name="Reducer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="values" type="java.util.Iterator&lt;V2&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K3, V3&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<i>Reduces</i> values for a given key.
+
+ <p>The framework calls this method for each
+ <code>&lt;key, (list of values)></code> pair in the grouped inputs.
+ Output values must be of the same type as input values. Input keys must
+ not be altered. The framework will <b>reuse</b> the key and value objects
+ that are passed into the reduce, therefore the application should clone
+ the objects they want to keep a copy of. In many cases, all values are
+ combined into zero or one value.
+ </p>
+
+ <p>Output pairs are collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the key.
+ @param values the list of values to reduce.
+ @param output to collect keys and combined values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reduces a set of intermediate values which share a key to a smaller set of
+ values.
+
+ <p>The number of <code>Reducer</code>s for the job is set by the user via
+ {@link JobConf#setNumReduceTasks(int)}. <code>Reducer</code> implementations
+ can access the {@link JobConf} for the job via the
+ {@link JobConfigurable#configure(JobConf)} method and initialize themselves.
+ Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p><code>Reducer</code> has 3 primary phases:</p>
+ <ol>
+ <li>
+
+ <h4 id="Shuffle">Shuffle</h4>
+
+ <p><code>Reducer</code> is input the grouped output of a {@link Mapper}.
+ In the phase the framework, for each <code>Reducer</code>, fetches the
+ relevant partition of the output of all the <code>Mapper</code>s, via HTTP.
+ </p>
+ </li>
+
+ <li>
+ <h4 id="Sort">Sort</h4>
+
+ <p>The framework groups <code>Reducer</code> inputs by <code>key</code>s
+ (since different <code>Mapper</code>s may have output the same key) in this
+ stage.</p>
+
+ <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
+ being fetched they are merged.</p>
+
+ <h5 id="SecondarySort">SecondarySort</h5>
+
+ <p>If equivalence rules for keys while grouping the intermediates are
+ different from those for grouping keys before reduction, then one may
+ specify a <code>Comparator</code> via
+ {@link JobConf#setOutputValueGroupingComparator(Class)}.Since
+ {@link JobConf#setOutputKeyComparatorClass(Class)} can be used to
+ control how intermediate keys are grouped, these can be used in conjunction
+ to simulate <i>secondary sort on values</i>.</p>
+
+
+ For example, say that you want to find duplicate web pages and tag them
+ all with the url of the "best" known example. You would set up the job
+ like:
+ <ul>
+ <li>Map Input Key: url</li>
+ <li>Map Input Value: document</li>
+ <li>Map Output Key: document checksum, url pagerank</li>
+ <li>Map Output Value: url</li>
+ <li>Partitioner: by checksum</li>
+ <li>OutputKeyComparator: by checksum and then decreasing pagerank</li>
+ <li>OutputValueGroupingComparator: by checksum</li>
+ </ul>
+ </li>
+
+ <li>
+ <h4 id="Reduce">Reduce</h4>
+
+ <p>In this phase the
+ {@link #reduce(Object, Iterator, OutputCollector, Reporter)}
+ method is called for each <code>&lt;key, (list of values)></code> pair in
+ the grouped inputs.</p>
+ <p>The output of the reduce task is typically written to the
+ {@link FileSystem} via
+ {@link OutputCollector#collect(Object, Object)}.</p>
+ </li>
+ </ol>
+
+ <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyReducer&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Reducer&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String reduceTaskId;
+ private int noKeys = 0;
+
+ public void configure(JobConf job) {
+ reduceTaskId = job.get("mapred.task.id");
+ }
+
+ public void reduce(K key, Iterator&lt;V&gt; values,
+ OutputCollector&lt;K, V&gt; output,
+ Reporter reporter)
+ throws IOException {
+
+ // Process
+ int noValues = 0;
+ while (values.hasNext()) {
+ V value = values.next();
+
+ // Increment the no. of values for this key
+ ++noValues;
+
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ if ((noValues%10) == 0) {
+ reporter.progress();
+ }
+
+ // Process some more
+ // ...
+ // ...
+
+ // Output the &lt;key, value&gt;
+ output.collect(key, value);
+ }
+
+ // Increment the no. of &lt;key, list of values&gt; pairs processed
+ ++noKeys;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 keys update application-level status
+ if ((noKeys%100) == 0) {
+ reporter.setStatus(reduceTaskId + " processed " + noKeys);
+ }
+ }
+ }
+ </pre></blockquote></p>
+
+ @see Mapper
+ @see Partitioner
+ @see Reporter
+ @see MapReduceBase]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reducer -->
+ <!-- start interface org.apache.hadoop.mapred.Reporter -->
+ <interface name="Reporter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Progressable"/>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the status description for the task.
+
+ @param status brief description of the current status.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the key, which can be of
+ any {@link Enum} type, by the specified amount.
+
+ @param key key to identify the counter to be incremented. The key can be
+ be any <code>Enum</code>.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="counter" type="java.lang.String"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the group and counter name
+ by the specified amount.
+
+ @param group name to identify the group of the counter to be incremented.
+ @param counter name to identify the counter within the group.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="getInputSplit" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+ <doc>
+ <![CDATA[Get the {@link InputSplit} object for a map.
+
+ @return the <code>InputSplit</code> that the map is reading from.
+ @throws UnsupportedOperationException if called outside a mapper]]>
+ </doc>
+ </method>
+ <field name="NULL" type="org.apache.hadoop.mapred.Reporter"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A constant of Reporter type that does nothing.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A facility for Map-Reduce applications to report progress and update
+ counters, status information etc.
+
+ <p>{@link Mapper} and {@link Reducer} can use the <code>Reporter</code>
+ provided to report progress or just indicate that they are alive. In
+ scenarios where the application takes an insignificant amount of time to
+ process individual key/value pairs, this is crucial since the framework
+ might assume that the task has timed-out and kill that task.
+
+ <p>Applications can also update {@link Counters} via the provided
+ <code>Reporter</code> .</p>
+
+ @see Progressable
+ @see Counters]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reporter -->
+ <!-- start interface org.apache.hadoop.mapred.RunningJob -->
+ <interface name="RunningJob" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job identifier.
+
+ @return the job identifier.]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="This method is deprecated and will be removed. Applications should
+ rather use {@link #getID()}.">
+ <doc>
+ <![CDATA[@deprecated This method is deprecated and will be removed. Applications should
+ rather use {@link #getID()}.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the job.
+
+ @return the name of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the path of the submitted job configuration.
+
+ @return the path of the submitted job configuration.]]>
+ </doc>
+ </method>
+ <method name="getTrackingURL" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the URL where some job progress information will be displayed.
+
+ @return the URL where some job progress information will be displayed.]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's map-tasks, as a float between 0.0
+ and 1.0. When all map tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's map-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0
+ and 1.0. When all reduce tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's reduce-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isComplete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job is finished or not.
+ This is a non-blocking call.
+
+ @return <code>true</code> if the job is complete, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isSuccessful" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job completed successfully.
+
+ @return <code>true</code> if the job succeeded, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="waitForCompletion"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Blocks until the job is complete.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill the running job. Blocks until all job tasks have been
+ killed as well. If the job is no longer running, it simply returns.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="startFrom" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get events indicating completion (success/failure) of component tasks.
+
+ @param startFrom index to start fetching events from
+ @return an array of {@link TaskCompletionEvent}s
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill indicated task attempt.
+
+ @param taskId the id of the task to be terminated.
+ @param shouldFail if true the task is failed and added to failed tasks
+ list, otherwise it is just killed, w/o affecting
+ job failure status.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #killTask(TaskAttemptID, boolean)}">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the counters for this job.
+
+ @return the counters for this job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RunningJob</code> is the user-interface to query for details on a
+ running Map-Reduce job.
+
+ <p>Clients can get hold of <code>RunningJob</code> via the {@link JobClient}
+ and then query the running-job for details such as name, configuration,
+ progress etc.</p>
+
+ @see JobClient]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RunningJob -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <class name="SequenceFileAsBinaryInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[InputFormat reading keys, values from SequenceFiles in binary (raw)
+ format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <class name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"/>
+ <constructor name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the key class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getKeyClassName]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the value class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getValueClassName]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.BytesWritable"/>
+ <param name="val" type="org.apache.hadoop.io.BytesWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read raw bytes from a SequenceFile.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Read records from a SequenceFile as binary (raw) bytes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+ <class name="SequenceFileAsBinaryOutputFormat" extends="org.apache.hadoop.mapred.SequenceFileOutputFormat&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setSequenceFileOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the {@link SequenceFile}
+ <p>This allows the user to specify the key class to be different
+ from the actual class ({@link BytesWritable}) used for writing </p>
+
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+ </doc>
+ </method>
+ <method name="setSequenceFileOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for the {@link SequenceFile}
+ <p>This allows the user to specify the value class to be different
+ from the actual class ({@link BytesWritable}) used for writing </p>
+
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+ </doc>
+ </method>
+ <method name="getSequenceFileOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the key class for the {@link SequenceFile}
+
+ @return the key class of the {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <method name="getSequenceFileOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the value class for the {@link SequenceFile}
+
+ @return the value class of the {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes keys, values to
+ {@link SequenceFile}s in binary(raw) format]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
+ <class name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" type="org.apache.hadoop.io.BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.BytesWritable"/>
+ </method>
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Inner class used for appendRaw]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <class name="SequenceFileAsTextInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is similar to SequenceFileInputFormat, except it generates SequenceFileAsTextRecordReader
+ which converts the input keys and values to their String forms by calling toString() method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <class name="SequenceFileAsTextRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="SequenceFileAsTextRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class converts the input keys and values to their String forms by calling toString()
+ method. This class to SequenceFileAsTextInputFormat class is as LineRecordReader
+ class to TextInputFormat class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <class name="SequenceFileInputFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a record reader for the given split
+ @param split file split
+ @param job job configuration
+ @param reporter reporter who sends report to task tracker
+ @return RecordReader]]>
+ </doc>
+ </method>
+ <method name="setFilterClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="filterClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[set the filter class
+
+ @param conf application configuration
+ @param filterClass filter class]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that allows a map/red job to work on a sample of sequence files.
+ The sample is decided by the filter class set by the job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <!-- start interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <interface name="SequenceFileInputFilter.Filter" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[filter function
+ Decide if a record should be filtered or not
+ @param key record key
+ @return true if a record is accepted; return false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[filter interface]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <class name="SequenceFileInputFilter.FilterBase" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.SequenceFileInputFilter.Filter"/>
+ <constructor name="SequenceFileInputFilter.FilterBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[base class for Filters]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <class name="SequenceFileInputFilter.MD5Filter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.MD5Filter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the filtering frequency in configuration
+
+ @param conf configuration
+ @param frequency filtering frequency]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter according to configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If MD5(key) % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class returns a set of records by examing the MD5 digest of its
+ key against a filtering frequency <i>f</i>. The filtering criteria is
+ MD5(key) % f == 0.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <class name="SequenceFileInputFilter.PercentFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.PercentFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the frequency and stores it in conf
+ @param conf configuration
+ @param frequency filtering frequencey]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter by checking the configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If record# % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class returns a percentage of records
+ The percentage is determined by a filtering frequency <i>f</i> using
+ the criteria record# % f == 0.
+ For example, if the frequency is 10, one out of 10 records is returned.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <class name="SequenceFileInputFilter.RegexFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.RegexFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setPattern"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="regex" type="java.lang.String"/>
+ <exception name="PatternSyntaxException" type="java.util.regex.PatternSyntaxException"/>
+ <doc>
+ <![CDATA[Define the filtering regex and stores it in conf
+ @param conf where the regex is set
+ @param regex regex used as a filter]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the Filter by checking the configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If key matches the regex, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Records filter by matching key to regex]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <class name="SequenceFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="listPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <class name="SequenceFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.SequenceFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf}
+ @return the {@link CompressionType} for the output {@link SequenceFile},
+ defaulting to {@link CompressionType#RECORD}]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf} to modify
+ @param style the {@link CompressionType} for the output
+ {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <class name="SequenceFileRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"/>
+ <constructor name="SequenceFileRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of key that must be passed to {@link
+ #next(Object, Object)}..]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of value that must be passed to {@link
+ #next(Object, Object)}..]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="conf" type="org.apache.hadoop.conf.Configuration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An {@link RecordReader} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer -->
+ <class name="StatusHttpServer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer" type="java.lang.String, java.lang.String, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a status server on the given port.
+ The jsp scripts are taken from src/webapps/<name>.
+ @param name The name of the server
+ @param port The port to use on the server
+ @param findPort whether the server should start at the given port and
+ increment by 1 until it finds a free port.]]>
+ </doc>
+ </constructor>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Set a value in the webapp context. These values are available to the jsp
+ pages as "application.getAttribute(name)".
+ @param name The name of the attribute
+ @param value The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="addServlet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="servletClass" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Add a servlet in the server.
+ @param name The name of the servlet (can be passed as null)
+ @param pathSpec The path spec for the servlet
+ @param servletClass The servlet class]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value in the webapp context.
+ @param name The name of the attribute
+ @return The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the port that the server is on
+ @return the port]]>
+ </doc>
+ </method>
+ <method name="setThreads"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="min" type="int"/>
+ <param name="max" type="int"/>
+ </method>
+ <method name="addSslListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="keystore" type="java.lang.String"/>
+ <param name="storPass" type="java.lang.String"/>
+ <param name="keyPass" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Configure an ssl listener on the server.
+ @param addr address to listen on
+ @param keystore location of the keystore
+ @param storPass password for the keystore
+ @param keyPass password for the key]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start the server. Does not wait for the server to start.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[stop the server]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Create a Jetty embedded server to answer http requests. The primary goal
+ is to serve up status information for the server.
+ There are three contexts:
+ "/logs/" -> points to the log directory
+ "/static/" -> points to common static files (src/webapps/static)
+ "/" -> the jsp server code from (src/webapps/<name>)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer.StackServlet -->
+ <class name="StatusHttpServer.StackServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer.StackServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A very simple servlet to serve up a text representation of the current
+ stack traces. It both returns the stacks to the caller and logs them.
+ Currently the stack traces are done sequentially rather than exactly the
+ same data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer.StackServlet -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet -->
+ <class name="StatusHttpServer.TaskGraphServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer.TaskGraphServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="width" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="height" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="ymargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on y axis]]>
+ </doc>
+ </field>
+ <field name="xmargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on x axis]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The servlet that outputs svg graphics for map / reduce task
+ statuses]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskAttemptID -->
+ <class name="TaskAttemptID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskAttemptID" type="org.apache.hadoop.mapred.TaskID, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskAttemptID object from given {@link TaskID}.
+ @param taskId TaskID that this task belongs to
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskAttemptID" type="java.lang.String, int, boolean, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param taskId taskId number
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link JobID} object that this task attempt belongs to]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link TaskID} object that this task attempt belongs to]]>
+ </doc>
+ </method>
+ <method name="isMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns whether this TaskAttemptID is a map ID]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare TaskIds by first tipIds, then by task numbers.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskAttemptID object from given string
+ @return constructed TaskAttemptID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getTaskAttemptIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <param name="isMap" type="java.lang.Boolean"/>
+ <param name="taskId" type="java.lang.Integer"/>
+ <param name="attemptId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task attempt IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>all task attempt IDs</i>
+ of <i>any jobtracker</i>, in <i>any job</i>, of the <i>first
+ map task</i>, we would use :
+ <pre>
+ TaskAttemptID.getTaskAttemptIDsPattern(null, null, true, 1, null);
+ </pre>
+ which will return :
+ <pre> "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null
+ @param taskId taskId number, or null
+ @param attemptId the task attempt number, or null
+ @return a regex pattern matching TaskAttemptIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[TaskAttemptID represents the immutable and unique identifier for
+ a task attempt. Each task attempt is one particular instance of a Map or
+ Reduce Task identified by its TaskID.
+
+ TaskAttemptID consists of 2 parts. First part is the
+ {@link TaskID}, that this TaskAttemptID belongs to.
+ Second part is the task attempt number. <br>
+ An example TaskAttemptID is :
+ <code>attempt_200707121733_0003_m_000005_0</code> , which represents the
+ zeroth task attempt for the fifth map task in the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskAttemptID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskAttemptID -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <class name="TaskCompletionEvent" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskCompletionEvent"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor for Writable.]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskCompletionEvent" type="int, java.lang.String, int, boolean, org.apache.hadoop.mapred.TaskCompletionEvent.Status, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TaskCompletionEvent" type="int, org.apache.hadoop.mapred.TaskAttemptID, int, boolean, org.apache.hadoop.mapred.TaskCompletionEvent.Status, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor. eventId should be created externally and incremented
+ per event for each job.
+ @param eventId event id, event id should be unique and assigned in
+ incrementally, starting from 0.
+ @param taskId task id
+ @param status task's status
+ @param taskTrackerHttp task tracker's host:port for http.]]>
+ </doc>
+ </constructor>
+ <method name="getEventId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns event Id.
+ @return event id]]>
+ </doc>
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getTaskAttemptId()} instead.">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id
+ @deprecated use {@link #getTaskAttemptId()} instead.]]>
+ </doc>
+ </method>
+ <method name="getTaskAttemptId" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id]]>
+ </doc>
+ </method>
+ <method name="getTaskStatus" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns enum Status.SUCESS or Status.FAILURE.
+ @return task tracker status]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerHttp" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[http location of the tasktracker where this task ran.
+ @return http location of tasktracker user logs]]>
+ </doc>
+ </method>
+ <method name="getTaskRunTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns time (in millisec) the task took to complete.]]>
+ </doc>
+ </method>
+ <method name="setTaskRunTime"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskCompletionTime" type="int"/>
+ <doc>
+ <![CDATA[Set the task completion time
+ @param taskCompletionTime time (in millisec) the task took to complete]]>
+ </doc>
+ </method>
+ <method name="setEventId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="eventId" type="int"/>
+ <doc>
+ <![CDATA[set event Id. should be assigned incrementally starting from 0.
+ @param eventId]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #setTaskID(TaskAttemptID)} instead.">
+ <param name="taskId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId
+ @deprecated use {@link #setTaskID(TaskAttemptID)} instead.]]>
+ </doc>
+ </method>
+ <method name="setTaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId]]>
+ </doc>
+ </method>
+ <method name="setTaskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"/>
+ <doc>
+ <![CDATA[Set task status.
+ @param status]]>
+ </doc>
+ </method>
+ <method name="setTaskTrackerHttp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTrackerHttp" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set task tracker http location.
+ @param taskTrackerHttp]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isMapTask" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="idWithinJob" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="EMPTY_ARRAY" type="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is used to track task completion events on
+ job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <class name="TaskCompletionEvent.Status" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.TaskCompletionEvent.Status&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <!-- start class org.apache.hadoop.mapred.TaskID -->
+ <class name="TaskID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskID" type="org.apache.hadoop.mapred.JobID, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskID object from given {@link JobID}.
+ @param jobId JobID that this tip belongs to
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskID" type="java.lang.String, int, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskInProgressId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link JobID} object that this tip belongs to]]>
+ </doc>
+ </method>
+ <method name="isMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns whether this TaskID is a map ID]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare TaskInProgressIds by first jobIds, then by tip numbers. Reduces are
+ defined as greater then maps.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskID object from given string
+ @return constructed TaskID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getTaskIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <param name="isMap" type="java.lang.Boolean"/>
+ <param name="taskId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>the first map task</i>
+ of <i>any jobtracker</i>, of <i>any job</i>, we would use :
+ <pre>
+ TaskID.getTaskIDsPattern(null, null, true, 1);
+ </pre>
+ which will return :
+ <pre> "task_[^_]*_[0-9]*_m_000001*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null
+ @param taskId taskId number, or null
+ @return a regex pattern matching TaskIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[TaskID represents the immutable and unique identifier for
+ a Map or Reduce Task. Each TaskID encompasses multiple attempts made to
+ execute the Map or Reduce Task, each of which are uniquely indentified by
+ their TaskAttemptID.
+
+ TaskID consists of 3 parts. First part is the {@link JobID}, that this
+ TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r'
+ representing whether the task is a map task or a reduce task.
+ And the third part is the task number. <br>
+ An example TaskID is :
+ <code>task_200707121733_0003_m_000005</code> , which represents the
+ fifth map task in the third job running at the jobtracker
+ started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskID -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog -->
+ <class name="TaskLog" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLog"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskLogFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="getTaskLogFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logsRetainHours" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Purge old user logs.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskLogLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the desired maximum length of task's logs.
+ @param conf the job to look in
+ @return the number of bytes to cap the log files at]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ If the tailLength is 0, the entire output will be saved.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="setup" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ Setup commands such as setting memory limit can be passed which
+ will be executed before exec.
+ If the tailLength is 0, the entire output will be saved.
+ @param setup The setup commands for the execed process.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="addCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="isExecutable" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add quotes to each of the command strings and
+ return as a single string
+ @param cmd The command to be quoted
+ @param isExecutable makes shell path if the first
+ argument is executable
+ @return returns The quoted string.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="captureDebugOut" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="debugoutFilename" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture debug script's
+ stdout and stderr to debugout.
+ @param cmd The command and the arguments that should be run
+ @param debugoutFilename The filename that stdout and stderr
+ should be saved to.
+ @return the modified command that should be run
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple logger to handle the task-specific user logs.
+ This class uses the system property <code>hadoop.log.dir</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <class name="TaskLog.LogName" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.TaskLog.LogName&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskLog.LogName[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskLog.LogName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The filter for userlogs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogAppender -->
+ <class name="TaskLogAppender" extends="org.apache.log4j.FileAppender"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogAppender"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="activateOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Getter/Setter methods for log4j.]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ </method>
+ <method name="getTotalLogFileSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setTotalLogFileSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logSize" type="long"/>
+ </method>
+ <doc>
+ <![CDATA[A simple log4j-appender for the task child's
+ map-reduce system logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogAppender -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogServlet -->
+ <class name="TaskLogServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the logs via http.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A servlet that is run by the TaskTrackers to provide the task logs via http.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskReport -->
+ <class name="TaskReport" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskReport"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getTaskID()} instead">
+ <doc>
+ <![CDATA[@deprecated use {@link #getTaskID()} instead]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The id of the task.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The amount completed, between zero and one.]]>
+ </doc>
+ </method>
+ <method name="getState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The most recent state, reported by a {@link Reporter}.]]>
+ </doc>
+ </method>
+ <method name="getDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A list of error messages.]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A table of counters.]]>
+ </doc>
+ </method>
+ <method name="getFinishTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get finish time of task.
+ @return 0, if finish time was not set else returns finish time.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get start time of task.
+ @return 0 if start time was not set, else start time.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A report on the state of a task.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskReport -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker -->
+ <class name="TaskTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.TaskUmbilicalProtocol"/>
+ <implements name="java.lang.Runnable"/>
+ <constructor name="TaskTracker" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start with the local machine name, and the default JobTracker]]>
+ </doc>
+ </constructor>
+ <method name="getTaskTrackerMetrics" return="org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cleanupStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Removes all contents of temporary storage. Called upon
+ startup, to remove any leftovers from previous run.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close down the TaskTracker and all its components. We must also shutdown
+ any running tasks or threads, and cleanup disk space. A new TaskTracker
+ within the same process space might be restarted, so everything must be
+ clean.]]>
+ </doc>
+ </method>
+ <method name="getJobClient" return="org.apache.hadoop.mapred.InterTrackerProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The connection to the JobTracker, used by the TaskRunner
+ for locating remote files.]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerReportAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the port at which the tasktracker bound to]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The server retry loop.
+ This while-loop attempts to connect to the JobTracker. It only
+ loops when the old TaskTracker has gone bad (its state is
+ stale somehow) and we need to reinitialize everything.]]>
+ </doc>
+ </method>
+ <method name="getTask" return="org.apache.hadoop.mapred.Task"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTask" return="org.apache.hadoop.mapred.Task"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called upon startup by the child process, to fetch Task data.]]>
+ </doc>
+ </method>
+ <method name="statusUpdate" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="status" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="statusUpdate" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskStatus" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called periodically to report Task progress, from 0.0 to 1.0.]]>
+ </doc>
+ </method>
+ <method name="reportDiagnosticInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="info" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportDiagnosticInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="info" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when the task dies before completion, and we want to report back
+ diagnostic info]]>
+ </doc>
+ </method>
+ <method name="ping" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ping" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Child checking to see if we're alive. Normally does nothing.]]>
+ </doc>
+ </method>
+ <method name="done"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="shouldPromote" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="done"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldPromote" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The task is done.]]>
+ </doc>
+ </method>
+ <method name="shuffleError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="shuffleError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A reduce-task failed to shuffle the map-outputs. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="fsError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="fsError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A child task had a local filesystem error. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="getMapCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="java.lang.String"/>
+ <param name="fromid" type="int"/>
+ <param name="maxlocs" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMapCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxLocs" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mapOutputLost"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mapOutputLost"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="errorMsg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A completed map task's output has been lost.]]>
+ </doc>
+ </method>
+ <method name="isIdle" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this task tracker idle?
+ @return has this task tracker finished and cleaned up all of its tasks?]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Start the TaskTracker, point toward the indicated JobTracker]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[TaskTracker is a process that starts and tracks MR Tasks
+ in a networked environment. It contacts the JobTracker
+ for Task assignments and reporting results.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.Child -->
+ <class name="TaskTracker.Child" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskTracker.Child"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ </method>
+ <doc>
+ <![CDATA[The main() for child processes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.Child -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <class name="TaskTracker.MapOutputServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskTracker.MapOutputServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in TaskTracker's Jetty to serve the map outputs
+ to other nodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics -->
+ <class name="TaskTracker.TaskTrackerMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Since this object is a registered updater, this method will be called
+ periodically, e.g. every 5 seconds.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics -->
+ <!-- start class org.apache.hadoop.mapred.TextInputFormat -->
+ <class name="TextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="TextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Keys are
+ the position in the file, and values are the line of text..]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat -->
+ <class name="TextOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes plain text files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+ <class name="TextOutputFormat.LineRecordWriter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"/>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+</package>
+<package name="org.apache.hadoop.mapred.jobcontrol">
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <class name="Job" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf, java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+ @param jobConf a mapred job configuration representing a job to be executed.
+ @param dependingJobs an array of jobs the current job depends on]]>
+ </doc>
+ </constructor>
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+
+ @param jobConf mapred job configuration representing a job to be executed.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job name of this job]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job name for this job.
+ @param jobName the job name]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job ID of this job assigned by JobControl]]>
+ </doc>
+ </method>
+ <method name="setJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job ID for this job.
+ @param id the job ID]]>
+ </doc>
+ </method>
+ <method name="getMapredJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getAssignedJobID()} instead">
+ <doc>
+ <![CDATA[@return the mapred ID of this job
+ @deprecated use {@link #getAssignedJobID()} instead]]>
+ </doc>
+ </method>
+ <method name="setMapredJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #setAssignedJobID(JobID)} instead">
+ <param name="mapredJobID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job.
+ @param mapredJobID the mapred job ID for this job.
+ @deprecated use {@link #setAssignedJobID(JobID)} instead]]>
+ </doc>
+ </method>
+ <method name="getAssignedJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred ID of this job as assigned by the
+ mapred framework.]]>
+ </doc>
+ </method>
+ <method name="setAssignedJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mapredJobID" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job as assigned by the
+ mapred framework.
+ @param mapredJobID the mapred job ID for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred job conf of this job]]>
+ </doc>
+ </method>
+ <method name="setJobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Set the mapred job conf for this job.
+ @param jobConf the mapred job conf for this job.]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the state of this job]]>
+ </doc>
+ </method>
+ <method name="setState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Set the state for this job.
+ @param state the new state for this job.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the message of this job]]>
+ </doc>
+ </method>
+ <method name="setMessage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="message" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the message for this job.
+ @param message the message for this job.]]>
+ </doc>
+ </method>
+ <method name="getDependingJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the depending jobs of this job]]>
+ </doc>
+ </method>
+ <method name="addDependingJob" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dependingJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a job to this jobs' dependency list. Dependent jobs can only be added while a Job
+ is waiting to run, not during or afterwards.
+
+ @param dependingJob Job that this Job depends on.
+ @return <tt>true</tt> if the Job was added.]]>
+ </doc>
+ </method>
+ <method name="isCompleted" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in a complete state]]>
+ </doc>
+ </method>
+ <method name="isReady" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in READY state]]>
+ </doc>
+ </method>
+ <method name="submit"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Submit this job to mapred. The state becomes RUNNING if submission
+ is successful, FAILED otherwise.]]>
+ </doc>
+ </method>
+ <field name="SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WAITING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEPENDENT_FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class encapsulates a MapReduce job and its dependency. It monitors
+ the states of the depending jobs and updates the state of this job.
+ A job stats in the WAITING state. If it does not have any deoending jobs, or
+ all of the depending jobs are in SUCCESS state, then the job state will become
+ READY. If any depending jobs fail, the job will fail too.
+ When in READY state, the job can be submitted to Hadoop for execution, with
+ the state changing into RUNNING state. From RUNNING state, the job can get into
+ SUCCESS or FAILED state, depending the status of the jon execution.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+ <class name="JobControl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobControl" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a job control for a group of jobs.
+ @param groupName a name identifying this group]]>
+ </doc>
+ </constructor>
+ <method name="getWaitingJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the waiting state]]>
+ </doc>
+ </method>
+ <method name="getRunningJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the running state]]>
+ </doc>
+ </method>
+ <method name="getReadyJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the ready state]]>
+ </doc>
+ </method>
+ <method name="getSuccessfulJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the success state]]>
+ </doc>
+ </method>
+ <method name="getFailedJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="addJob" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="aJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a new job.
+ @param aJob the the new job]]>
+ </doc>
+ </method>
+ <method name="addJobs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobs" type="java.util.Collection&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"/>
+ <doc>
+ <![CDATA[Add a collection of jobs
+
+ @param jobs]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the thread state]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[set the thread state to STOPPING so that the
+ thread will stop when it wakes up.]]>
+ </doc>
+ </method>
+ <method name="suspend"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[suspend the running thread]]>
+ </doc>
+ </method>
+ <method name="resume"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[resume the suspended thread]]>
+ </doc>
+ </method>
+ <method name="allFinished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The main loop for the thread.
+ The loop does the following:
+ Check the states of the running jobs
+ Update the states of waiting jobs
+ Submit the jobs in ready state]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a set of MapReduce jobs and its dependency. It tracks
+ the states of the jobs by placing them into different tables according to their
+ states.
+
+ This class provides APIs for the client app to add a job to the group and to get
+ the jobs in the group in different states. When a
+ job is added, an ID unique to the group is assigned to the job.
+
+ This class has a thread that submits jobs when they become ready, monitors the
+ states of the running jobs, and updates the states of jobs based on the state changes
+ of their depending jobs states. The class provides APIs for suspending/resuming
+ the thread,and for stopping the thread.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+</package>
+<package name="org.apache.hadoop.mapred.join">
+ <!-- start class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <class name="ArrayListBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"/>
+ <constructor name="ArrayListBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayListBackedIterator" type="java.util.ArrayList&lt;X&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. The
+ implementation uses an {@link java.util.ArrayList} to store elements
+ added to it, replaying them as requested.
+ Prefer {@link StreamBackedIterator}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <interface name="ComposableInputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Refinement of InputFormat requiring implementors to provide
+ ComposableRecordReader instead of RecordReader.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <interface name="ComposableRecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"/>
+ <implements name="java.lang.Comparable&lt;org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;&gt;"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key this RecordReader would supply on a call to next(K,V)]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RecordReader into the object provided.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the stream is not empty, but provides no guarantee that
+ a call to next(K,V) will succeed.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[While key-value pairs from this RecordReader match the given key, register
+ them with the JoinCollector provided.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Additional operations required of a RecordReader to participate in a join.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <class name="CompositeInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="CompositeInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Interpret a given string as a composite expression.
+ {@code
+ func ::= <ident>([<func>,]*<func>)
+ func ::= tbl(<class>,"<path>")
+ class ::= @see java.lang.Class#forName(java.lang.String)
+ path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String)
+ }
+ Reads expression from the <tt>mapred.join.expr</tt> property and
+ user-supplied join types from <tt>mapred.join.define.&lt;ident&gt;</tt>
+ types. Paths supplied to <tt>tbl</tt> are given as input paths to the
+ InputFormat class listed.
+ @see #compose(java.lang.String, java.lang.Class, java.lang.String...)]]>
+ </doc>
+ </method>
+ <method name="addDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds the default set of identifiers to the parser.]]>
+ </doc>
+ </method>
+ <method name="validateInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Verify that this composite has children and that all its children
+ can validate their input.]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a CompositeInputSplit from the child InputFormats by assigning the
+ ith split from each child to the ith composite split.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a CompositeRecordReader for the children of this InputFormat
+ as defined in the init expression.
+ The outermost join need only be composable, not necessarily a composite.
+ Mandating TupleWritable isn't strictly correct.]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given InputFormat class (inf), path (p) return:
+ {@code tbl(<inf>, <p>) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An InputFormat capable of performing joins over a set of data sources sorted
+ and partitioned the same way.
+ @see #setFormat
+
+ A user may define new join types by setting the property
+ <tt>mapred.join.define.&lt;ident&gt;</tt> to a classname. In the expression
+ <tt>mapred.join.expr</tt>, the identifier will be assumed to be a
+ ComposableRecordReader.
+ <tt>mapred.join.keycomparator</tt> can be a classname used to compare keys
+ in the join.
+ @see JoinRecordReader
+ @see MultiFilterRecordReader]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <class name="CompositeInputSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="CompositeInputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CompositeInputSplit" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.mapred.InputSplit"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an InputSplit to this collection.
+ @throws IOException If capacity was not specified during construction
+ or if capacity has been reached.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the aggregate length of all child InputSplits currently added.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the length of ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Collect a set of hosts from all child InputSplits.]]>
+ </doc>
+ </method>
+ <method name="getLocation" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[getLocations from ith InputSplit.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write splits in the following format.
+ {@code
+ <count><class1><class2>...<classn><split1><split2>...<splitn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}
+ @throws IOException If the child InputSplit cannot be read, typically
+ for faliing access checks.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This InputSplit contains a set of child InputSplits. Any InputSplit inserted
+ into this collection must have a public default constructor.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <class name="CompositeRecordReader" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="CompositeRecordReader" type="int, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a RecordReader with <tt>capacity</tt> children to position
+ <tt>id</tt> in the parent reader.
+ The id of a root CompositeRecordReader is -1 by convention, but relying
+ on this is not recommended.]]>
+ </doc>
+ </constructor>
+ <method name="combine" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ </method>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordReaderQueue" return="java.util.PriorityQueue&lt;org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return sorted list of RecordReaders for this composite.]]>
+ </doc>
+ </method>
+ <method name="getComparator" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return comparator defining the ordering for RecordReaders in this
+ composite.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rr" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ? extends V&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a RecordReader to this collection.
+ The id() of a RecordReader determines where in the Tuple its
+ entry will appear. Adding RecordReaders with the same id has
+ undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key for the current join or the value at the top of the
+ RecordReader heap.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the top of this RR into the given object.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if it is possible that this could emit more values.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Pass skip key to child RRs.]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Obtain an iterator over the child RRs apropos of the value type
+ ultimately emitted from this join.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If key provided matches that of this Composite, give JoinCollector
+ iterator over values it may emit.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For all child RRs offering the key provided, obtain an iterator
+ at that position in the JoinCollector.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key of join or head of heap
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new key value common to all child RRs.
+ @throws ClassCastException if key classes differ.]]>
+ </doc>
+ </method>
+ <method name="createInternalValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a value to be used internally for joins.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unsupported (returns zero in all cases).]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all child RRs.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report progress as the minimum of all child RR progress.]]>
+ </doc>
+ </method>
+ <field name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, V, X&gt;.JoinCollector"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="kids" type="org.apache.hadoop.mapred.join.ComposableRecordReader[]"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A RecordReader that can effect joins of RecordReaders sharing a common key
+ type and partitioning.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <class name="InnerJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader&lt;K&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Return true iff the tuple is full (all data sources contain this key).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full inner join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <class name="JoinRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, org.apache.hadoop.io.Writable, org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="JoinRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Emit the next set of key, value pairs as defined by the child
+ RecordReaders and operation associated with this composite RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator wrapping the JoinCollector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite joins returning Tuples of arbitrary Writables.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <class name="JoinRecordReader.JoinDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="JoinRecordReader.JoinDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Since the JoinCollector is effecting our operation, we need only
+ provide an iterator proxy wrapping its operation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <class name="MultiFilterRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, V, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, V&gt;"/>
+ <constructor name="MultiFilterRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="emit" return="V extends org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For each tuple emitted, return a value (typically one of the values
+ in the tuple).
+ Modifying the Writables in the tuple is permitted and unlikely to affect
+ join behavior in most cases, but it is not recommended. It's safer to
+ clone first.]]>
+ </doc>
+ </method>
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Default implementation offers {@link #emit} every Tuple from the
+ collector (the outer join of child RRs).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createValue" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator returning a single value from the tuple.
+ @see MultiFilterDelegationIterator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite join returning values derived from multiple
+ sources, but generally not tuples.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <class name="MultiFilterRecordReader.MultiFilterDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;V&gt;"/>
+ <constructor name="MultiFilterRecordReader.MultiFilterDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy the JoinCollector, but include callback to emit.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <class name="OuterJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader&lt;K&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit everything from the collector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full outer join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <class name="OverrideRecordReader" extends="org.apache.hadoop.mapred.join.MultiFilterRecordReader&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="emit" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit the value with the highest position in the tuple.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instead of filling the JoinCollector with iterators from all
+ data sources, fill only the rightmost for this key.
+ This not only saves space by discarding the other sources, but
+ it also emits the number of key-value pairs in the preferred
+ RecordReader instead of repeating that stream n times, where
+ n is the cardinality of the cross product of the discarded
+ streams for the given key.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Prefer the &quot;rightmost&quot; data source for this key.
+ For example, <tt>override(S1,S2,S3)</tt> will prefer values
+ from S3 over S2, and values from S2 over S1 for all keys
+ emitted from all sources.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser -->
+ <class name="Parser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Very simple shift-reduce parser for join expressions.
+
+ This should be sufficient for the user extension permitted now, but ought to
+ be replaced with a parser generator if more complex grammars are supported.
+ In particular, this &quot;shift-reduce&quot; parser has no states. Each set
+ of formals requires a different internal node type, which is responsible for
+ interpreting the list of tokens it receives. This is sufficient for the
+ current grammar, but it has several annoying properties that might inhibit
+ extension. In particular, parenthesis are always function calls; an
+ algebraic or filter grammar would not only require a node type, but must
+ also work around the internals of this parser.
+
+ For most other cases, adding classes to the hierarchy- particularly by
+ extending JoinRecordReader and MultiFilterRecordReader- is fairly
+ straightforward. One need only override the relevant method(s) (usually only
+ {@link CompositeRecordReader#combine}) and include a property to map its
+ value to an identifier in the parser.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Node -->
+ <class name="Parser.Node" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat"/>
+ <constructor name="Parser.Node" type="java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addIdentifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="ident" type="java.lang.String"/>
+ <param name="mcstrSig" type="java.lang.Class[]"/>
+ <param name="nodetype" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.join.Parser.Node&gt;"/>
+ <param name="cl" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.join.ComposableRecordReader&gt;"/>
+ <exception name="NoSuchMethodException" type="java.lang.NoSuchMethodException"/>
+ <doc>
+ <![CDATA[For a given identifier, add a mapping to the nodetype for the parse
+ tree and to the ComposableRecordReader to be created, including the
+ formals required to invoke the constructor.
+ The nodetype and constructor signature should be filled in from the
+ child node.]]>
+ </doc>
+ </method>
+ <method name="setID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="int"/>
+ </method>
+ <method name="setKeyComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="cmpcl" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"/>
+ </method>
+ <field name="rrCstrMap" type="java.util.Map&lt;java.lang.String, java.lang.reflect.Constructor&lt;? extends org.apache.hadoop.mapred.join.ComposableRecordReader&gt;&gt;"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ident" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="cmpcl" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Node -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <class name="Parser.NodeToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <class name="Parser.NumToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.NumToken" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <class name="Parser.StrToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.StrToken" type="org.apache.hadoop.mapred.join.Parser.TType, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Token -->
+ <class name="Parser.Token" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getType" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Tagged-union type for tokens from the join expression.
+ @see Parser.TType]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Token -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.TType -->
+ <class name="Parser.TType" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.join.Parser.TType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.join.Parser.TType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.TType -->
+ <!-- start interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <interface name="ResetableIterator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True iff a call to next will succeed.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign next value to actual.
+ It is required that elements added to a ResetableIterator be returned in
+ the same order after a call to {@link #reset} (FIFO).
+
+ Note that a call to this may fail for nested joins (i.e. more elements
+ available, but none satisfying the constraints of the join)]]>
+ </doc>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign last value returned to actual.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set iterator to return to the start of its range. Must be called after
+ calling {@link #add} to avoid a ConcurrentModificationException.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an element to the collection of elements to iterate over.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close datasources and release resources. Calling methods on the iterator
+ after calling close has undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Close datasources, but do not release internal resources. Calling this
+ method should permit the object to be reused with a different datasource.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This defines an interface to a stateful Iterator that can replay elements
+ added to it directly.
+ Note that this does not extend {@link java.util.Iterator}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <class name="ResetableIterator.EMPTY" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;U&gt;"/>
+ <constructor name="ResetableIterator.EMPTY"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <!-- start class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <class name="StreamBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"/>
+ <constructor name="StreamBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. This
+ implementation uses a byte array to store elements added to it.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.TupleWritable -->
+ <class name="TupleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="TupleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty tuple with no allocated storage for writables.]]>
+ </doc>
+ </constructor>
+ <constructor name="TupleWritable" type="org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Initialize tuple with storage; unknown whether any of them contain
+ &quot;written&quot; values.]]>
+ </doc>
+ </constructor>
+ <method name="has" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Return true if tuple has an element at the position provided.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith Writable from Tuple.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of children in this Tuple.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator over the elements in this tuple.
+ Note that this doesn't flatten the tuple; one may receive tuples
+ from this iterator.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert Tuple to String as in the following.
+ <tt>[<child1>,<child2>,...,<childn>]</tt>]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes each Writable to <code>out</code>.
+ TupleWritable format:
+ {@code
+ <count><type1><type2>...<typen><obj1><obj2>...<objn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writable type storing multiple {@link org.apache.hadoop.io.Writable}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.TupleWritable -->
+ <!-- start class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+ <class name="WrappedRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, U&gt;"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key at the head of this RR.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RR into the object supplied.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if the RR- including the k,v pair stored in this object-
+ is exhausted.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next k,v pair into the head of this object; return true iff
+ the RR and this are exhausted.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an iterator to the collector at the position occupied by this
+ RecordReader over the values in this stream paired with the key
+ provided (ie register a stream of values from this source matching K
+ with a collector).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write key-value pair at the head of this stream to the objects provided;
+ get next key-value pair from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new key from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="U extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new value from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request progress from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request position from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Forward close request to proxied RR.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key at head of proxied RR
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true iff compareTo(other) retn true.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy class for a RecordReader participating in the join framework.
+ This class keeps track of the &quot;head&quot; key-value pair for the
+ provided RecordReader and keeps a store of values matching a key when
+ this source is participating in a join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+</package>
+<package name="org.apache.hadoop.mapred.lib">
+ <!-- start class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <class name="FieldSelectionMapReduce" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="FieldSelectionMapReduce"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="val" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to output.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements a mapper/reducer class that can be used to perform
+ field selections in a manner similar to unix cut. The input data is treated
+ as fields separated by a user specified separator (the default value is
+ "\t"). The user can specify a list of fields that form the map output keys,
+ and a list of fields that form the map output values. If the inputformat is
+ TextInputFormat, the mapper will ignore the key to the map function. and the
+ fields are from the value only. Otherwise, the fields are the union of those
+ from the key and those from the value.
+
+ The field separator is under attribute "mapred.data.field.separator"
+
+ The map output field list spec is under attribute "map.output.key.value.fields.spec".
+ The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
+ key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
+ Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
+ (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all
+ the fields starting from field 3. The open range field spec applies value fields only.
+ They have no effect on the key fields.
+
+ Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
+ and use fields 6,5,1,2,3,7 and above for values.
+
+ The reduce output field list spec is under attribute "reduce.output.key.value.fields.spec".
+
+ The reducer extracts output key/value pairs in a similar manner, except that
+ the key is never ignored.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <!-- start class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <class name="HashPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K2, V2&gt;"/>
+ <constructor name="HashPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numReduceTasks" type="int"/>
+ <doc>
+ <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partition keys by their {@link Object#hashCode()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <class name="IdentityMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, K, V&gt;"/>
+ <constructor name="IdentityMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="val" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, V&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to
+ output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implements the identity function, mapping inputs directly to outputs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <class name="IdentityReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;K, V, K, V&gt;"/>
+ <constructor name="IdentityReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="values" type="java.util.Iterator&lt;V&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, V&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes all keys and values directly to output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Performs no reduction, writing all input values directly to the output.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <class name="InverseMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, V, K&gt;"/>
+ <constructor name="InverseMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;V, K&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The inverse function. Input keys and values are swapped.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that swaps keys and values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <class name="KeyFieldBasedPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K2, V2&gt;"/>
+ <constructor name="KeyFieldBasedPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numReduceTasks" type="int"/>
+ <doc>
+ <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <class name="LongSumReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;K, org.apache.hadoop.io.LongWritable, K, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="LongSumReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Reducer} that sums long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <class name="MultipleOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a composite record writer that can write key/value data to different
+ output files
+
+ @param fs
+ the file system to use
+ @param job
+ the job conf for the job
+ @param name
+ the leaf file name for the output file (such as part-00000")
+ @param arg3
+ a progressable for reporting progress.
+ @return a composite record writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="generateLeafFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the leaf name for the output file name. The default behavior does
+ not change the leaf file name (such as part-00000)
+
+ @param name
+ the leaf file name for the output file
+ @return the given leaf file name]]>
+ </doc>
+ </method>
+ <method name="generateFileNameForKeyValue" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the file output file name based on the given key and the leaf file
+ name. The default behavior is that the file name does not depend on the
+ key.
+
+ @param key
+ the key of the output data
+ @param name
+ the leaf file name
+ @return generated file name]]>
+ </doc>
+ </method>
+ <method name="generateActualKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <doc>
+ <![CDATA[Generate the actual key from the given key/value. The default behavior is that
+ the actual key is equal to the given key
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual key derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="generateActualValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <doc>
+ <![CDATA[Generate the actual value from the given key and value. The default behavior is that
+ the actual value is equal to the given value
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual value derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="getInputFileBasedOutputFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the outfile name based on a given anme and the input file name. If
+ the map input file does not exists (i.e. this is not for a map only job),
+ the given name is returned unchanged. If the config value for
+ "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
+ name is returned unchanged. Otherwise, return a file name consisting of the
+ N trailing legs of the input file name where N is the config value for
+ "num.of.trailing.legs.to.use".
+
+ @param job
+ the job config
+ @param name
+ the output file name
+ @return the outfile name based on a given anme and the input file name.]]>
+ </doc>
+ </method>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param fs
+ the file system to use
+ @param job
+ a job conf object
+ @param name
+ the name of the file over which a record writer object will be
+ constructed
+ @param arg3
+ a progressable object
+ @return A RecordWriter object over the given file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This abstract class extends the OutputFormatBase, allowing to write the
+ output data to different output files. There are three basic use cases for
+ this class.
+
+ Case one: This class is used for a map reduce job with at least one reducer.
+ The reducer wants to write data to different files depending on the actual
+ keys. It is assumed that a key (or value) encodes the actual key (value)
+ and the desired location for the actual key (value).
+
+ Case two: This class is used for a map only job. The job wants to use an
+ output file name that is either a part of the input file name of the input
+ data, or some derivation of it.
+
+ Case three: This class is used for a map only job. The job wants to use an
+ output file name that depends on both the keys and the input file name,]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <class name="MultipleSequenceFileOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleSequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output data
+ to different output files in sequence file output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <class name="MultipleTextOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleTextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output
+ data to different output files in Text output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <class name="MultithreadedMapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="MultithreadedMapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Multithreaded implementation for @link org.apache.hadoop.mapred.MapRunnable.
+ <p>
+ It can be used instead of the default implementation,
+ @link org.apache.hadoop.mapred.MapRunner, when the Map operation is not CPU
+ bound in order to improve throughput.
+ <p>
+ Map implementations using this MapRunnable must be thread-safe.
+ <p>
+ The Map-Reduce job has to be configured to use this MapRunnable class (using
+ the JobConf.setMapRunnerClass method) and
+ the number of thread the thread-pool can use with the
+ <code>mapred.map.multithreadedrunner.threads</code> property, its default
+ value is 10 threads.
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <!-- start class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+ <class name="NLineInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="NLineInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically splits the set of input files for the job, splits N lines
+ of the input as one split.
+
+ @see org.apache.hadoop.mapred.FileInputFormat#getSplits(JobConf, int)]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[NLineInputFormat which splits N lines of input as one split.
+
+ In many "pleasantly" parallel applications, each process/mapper
+ processes the same input file (s), but with computations are
+ controlled by different parameters.(Referred to as "parameter sweeps").
+ One way to achieve this, is to specify a set of parameters
+ (one set per line) as input in a control file
+ (which is the input path to the map-reduce application,
+ where as the input dataset is specified
+ via a config variable in JobConf.).
+
+ The NLineInputFormat can be used in such applications, that splits
+ the input file such that by default, one line is fed as
+ a value to one map task, and key is the offset.
+ i.e. (k,v) is (LongWritable, Text).
+ The location hints will span the whole mapred cluster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <class name="NullOutputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="NullOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[Consume all outputs and put them in /dev/null.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <class name="RegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="RegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+ <class name="TokenCountMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="TokenCountMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that maps text values into <token,freq> pairs. Uses
+ {@link StringTokenizer} to break text into tokens.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+</package>
+<package name="org.apache.hadoop.mapred.lib.aggregate">
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <class name="DoubleValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="DoubleValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a double value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="double"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a double value.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getSum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up a sequence of double
+ values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <class name="LongValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the maximum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <class name="LongValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the minimum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <class name="LongValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getSum" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <class name="StringValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the biggest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <class name="StringValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the smallest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <class name="UniqValueCount" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="UniqValueCount"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UniqValueCount" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor
+ @param maxNum the limit in the number of unique values to keep.]]>
+ </doc>
+ </constructor>
+ <method name="setMaxItems" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <doc>
+ <![CDATA[Set the limit on the number of unique values
+ @param n the desired limit on the number of unique values
+ @return the new limit on the number of unique values]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return the number of unique objects aggregated]]>
+ </doc>
+ </method>
+ <method name="getUniqueItems" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the set of the unique objects]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of the unique objects. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that dedupes a sequence of objects.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <class name="UserDefinedValueAggregatorDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="UserDefinedValueAggregatorDescriptor" type="java.lang.String, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param className the class name of the user defined descriptor class
+ @param job a configure object used for decriptor configuration]]>
+ </doc>
+ </constructor>
+ <method name="createInstance" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="className" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create an instance of the given class
+ @param className the name of the class
+ @return a dynamically created instance of the given class]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pairs
+ by delegating the invocation to the real object.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a wrapper for a user defined value aggregator descriptor.
+ It servs two functions: One is to create an object of ValueAggregatorDescriptor from the
+ name of a user defined class that may be dynamically loaded. The other is to
+ deligate inviokations of generateKeyValPairs function to the created object.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <interface name="ValueAggregator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val the value to be added]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the agregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return an array of values as the outputs of the combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface defines the minimal protocol for value aggregators.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <class name="ValueAggregatorBaseDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="ValueAggregatorBaseDescriptor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="generateEntry" return="java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <param name="id" type="java.lang.String"/>
+ <param name="val" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @param id the aggregation id
+ @param val the val associated with the id to be aggregated
+ @return an Entry whose key is the aggregation id prefixed with
+ the aggregation type.]]>
+ </doc>
+ </method>
+ <method name="generateValueAggregator" return="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @return a value aggregator of the given type.]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate 1 or 2 aggregation-id/value pairs for the given key/value pair.
+ The first id will be of type LONG_VALUE_SUM, with "record_count" as
+ its aggregation id. If the input is a file split,
+ the second id of the same type will be generated too, with the file name
+ as its aggregation id. This achieves the behavior of counting the total number
+ of records in the input data, and the number of records in each input file.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[get the input file name.
+
+ @param job a job configuration object]]>
+ </doc>
+ </method>
+ <field name="UNIQ_VALUE_COUNT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VALUE_HISTOGRAM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputFile" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements the common functionalities of
+ the subclasses of ValueAggregatorDescriptor class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <class name="ValueAggregatorCombiner" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorCombiner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Combiner does not need to configure.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Combines values for a given key.
+ @param key the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values.
+ @param values the values to combine
+ @param output to collect combined values]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic combiner of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <interface name="ValueAggregatorDescriptor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pair.
+ This function is usually called by the mapper of an Aggregate based job.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configure the object
+
+ @param job
+ a JobConf object that may contain the information that can be used
+ to configure the object.]]>
+ </doc>
+ </method>
+ <field name="TYPE_SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ONE" type="org.apache.hadoop.io.Text"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This interface defines the contract a value aggregator descriptor must
+ support. Such a descriptor can be configured with a JobConf object. Its main
+ function is to generate a list of aggregation-id/value pairs. An aggregation
+ id encodes an aggregation type which is used to guide the way to aggregate
+ the value in the reduce/combiner phrase of an Aggregate based job.The mapper in
+ an Aggregate based map/reduce job may create one or more of
+ ValueAggregatorDescriptor objects at configuration time. For each input
+ key/value pair, the mapper will use those objects to create aggregation
+ id/value pairs.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <class name="ValueAggregatorJob" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorJob"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation. Generic hadoop
+ arguments are accepted.
+ @return a JobConf object ready for submission.
+
+ @throws IOException
+ @see GenericOptionsParser]]>
+ </doc>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setAggregatorDescriptors"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create and run an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the main class for creating a map/reduce job using Aggregate
+ framework. The Aggregate is a specialization of map/reduce framework,
+ specilizing for performing various simple aggregations.
+
+ Generally speaking, in order to implement an application using Map/Reduce
+ model, the developer is to implement Map and Reduce functions (and possibly
+ combine function). However, a lot of applications related to counting and
+ statistics computing have very similar characteristics. Aggregate abstracts
+ out the general patterns of these functions and implementing those patterns.
+ In particular, the package provides generic mapper/redducer/combiner classes,
+ and a set of built-in value aggregators, and a generic utility class that
+ helps user create map/reduce jobs using the generic class. The built-in
+ aggregators include:
+
+ sum over numeric values count the number of distinct values compute the
+ histogram of values compute the minimum, maximum, media,average, standard
+ deviation of numeric values
+
+ The developer using Aggregate will need only to provide a plugin class
+ conforming to the following interface:
+
+ public interface ValueAggregatorDescriptor { public ArrayList<Entry>
+ generateKeyValPairs(Object key, Object value); public void
+ configure(JobConfjob); }
+
+ The package also provides a base class, ValueAggregatorBaseDescriptor,
+ implementing the above interface. The user can extend the base class and
+ implement generateKeyValPairs accordingly.
+
+ The primary work of generateKeyValPairs is to emit one or more key/value
+ pairs based on the input key/value pair. The key in an output key/value pair
+ encode two pieces of information: aggregation type and aggregation id. The
+ value will be aggregated onto the aggregation id according the aggregation
+ type.
+
+ This class offers a function to generate a map/reduce job using Aggregate
+ framework. The function takes the following parameters: input directory spec
+ input format (text or sequence file) output directory a file specifying the
+ user plugin class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <class name="ValueAggregatorJobBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K1, V1, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="ValueAggregatorJobBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="logSpec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="aggregatorDescriptorList" type="java.util.ArrayList&lt;org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This abstract class implements some common functionalities of the
+ the generic mapper, reducer and combiner classes of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <class name="ValueAggregatorMapper" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[the map function. It iterates through the value aggregator descriptor
+ list to generate aggregation id/value pairs and emit them.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="org.apache.hadoop.io.Text"/>
+ <param name="arg1" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic mapper of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <class name="ValueAggregatorReducer" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param key
+ the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values. In effect, data
+ driven computing is achieved. It is assumed that each aggregator's
+ getReport method emits appropriate output for the aggregator. This
+ may be further customiized.
+ @value the values to be aggregated]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic reducer of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+ <class name="ValueHistogram" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="ValueHistogram"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add the given val to the aggregator.
+
+ @param val the value to be added. It is expected to be a string
+ in the form of xxxx\tnum, meaning xxxx has num occurrences.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this aggregator.
+ It includes the following basic statistics of the histogram:
+ the number of unique values
+ the minimum value
+ the media value
+ the maximum value
+ the average value
+ the standard deviation]]>
+ </doc>
+ </method>
+ <method name="getReportDetails" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a string representation of the list of value/frequence pairs of
+ the histogram]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a list value/frequence pairs.
+ The return value is expected to be used by the reducer.]]>
+ </doc>
+ </method>
+ <method name="getReportItems" return="java.util.TreeMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a TreeMap representation of the histogram]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that computes the
+ histogram of a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+</package>
+<package name="org.apache.hadoop.mapred.pipes">
+ <!-- start class org.apache.hadoop.mapred.pipes.Submitter -->
+ <class name="Submitter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Submitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExecutable" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the URI of the application's executable.
+ @param conf
+ @return the URI where the application's executable is located]]>
+ </doc>
+ </method>
+ <method name="setExecutable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="executable" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the URI for the application's executable. Normally this is a hdfs:
+ location.
+ @param conf
+ @param executable The URI of the application's executable.]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job is using a Java RecordReader.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordReader" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java RecordReader
+ @param conf the configuration to check
+ @return is it a Java RecordReader?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Mapper is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaMapper" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Mapper.
+ @param conf the configuration to check
+ @return is it a Java Mapper?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaReducer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Reducer is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaReducer" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Reducer.
+ @param conf the configuration to check
+ @return is it a Java Reducer?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job will use a Java RecordWriter.
+ @param conf the configuration to modify
+ @param value the new value to set]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordWriter" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Will the reduce use a Java RecordWriter?
+ @param conf the configuration to check
+ @return true, if the output of the job will be written by Java]]>
+ </doc>
+ </method>
+ <method name="getKeepCommandFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Does the user want to keep the command file for debugging? If this is
+ true, pipes will write a copy of the command data to a file in the
+ task directory named "downlink.data", which may be used to run the C++
+ program under the debugger. You probably also want to set
+ JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from
+ being deleted.
+ To run using the data file, set the environment variable
+ "hadoop.pipes.command.file" to point to the file.
+ @param conf the configuration to check
+ @return will the framework save the command file?]]>
+ </doc>
+ </method>
+ <method name="setKeepCommandFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether to keep the command file for debugging
+ @param conf the configuration to modify
+ @param keep the new value]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications
+ to the job to run under pipes are made to the configuration.
+ @param conf the job to submit to the cluster (MODIFIED)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Submit a pipes job based on the command line arguments.
+ @param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The main entry point and job submitter. It may either be used as a command
+ line-based or API-based method to launch Pipes jobs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.pipes.Submitter -->
+</package>
+<package name="org.apache.hadoop.metrics">
+ <!-- start class org.apache.hadoop.metrics.ContextFactory -->
+ <class name="ContextFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ContextFactory"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of ContextFactory]]>
+ </doc>
+ </constructor>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the named attribute, or null if there is no
+ attribute of that name.
+
+ @param attributeName the attribute name
+ @return the attribute value]]>
+ </doc>
+ </method>
+ <method name="getAttributeNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all the factory's attributes.
+
+ @return the attribute names]]>
+ </doc>
+ </method>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Sets the named factory attribute to the specified value, creating it
+ if it did not already exist. If the value is null, this is the same as
+ calling removeAttribute.
+
+ @param attributeName the attribute name
+ @param value the new attribute value]]>
+ </doc>
+ </method>
+ <method name="removeAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes the named attribute if it exists.
+
+ @param attributeName the attribute name]]>
+ </doc>
+ </method>
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <exception name="InstantiationException" type="java.lang.InstantiationException"/>
+ <exception name="IllegalAccessException" type="java.lang.IllegalAccessException"/>
+ <doc>
+ <![CDATA[Returns the named MetricsContext instance, constructing it if necessary
+ using the factory's current configuration attributes. <p/>
+
+ When constructing the instance, if the factory property
+ <i>contextName</i>.class</code> exists,
+ its value is taken to be the name of the class to instantiate. Otherwise,
+ the default is to create an instance of
+ <code>org.apache.hadoop.metrics.spi.NullContext</code>, which is a
+ dummy "no-op" context which will cause all metric data to be discarded.
+
+ @param contextName the name of the context
+ @return the named MetricsContext]]>
+ </doc>
+ </method>
+ <method name="getNullContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a "null" context - one which does nothing.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the singleton ContextFactory instance, constructing it if
+ necessary. <p/>
+
+ When the instance is constructed, this method checks if the file
+ <code>hadoop-metrics.properties</code> exists on the class path. If it
+ exists, it must be in the format defined by java.util.Properties, and all
+ the properties in the file are set as attributes on the newly created
+ ContextFactory instance.
+
+ @return the singleton ContextFactory instance]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factory class for creating MetricsContext objects. To obtain an instance
+ of this class, use the static <code>getFactory()</code> method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ContextFactory -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsContext -->
+ <interface name="MetricsContext" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.
+
+ @return the context name]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records as they are
+ updated.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free any data that the implementation
+ may have buffered for sending at the next timer event. It
+ is OK to call <code>startMonitoring()</code> again after calling
+ this.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and also frees any buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new MetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at regular time intervals, as
+ determined by the implementation-class specific configuration.
+
+ @param updater object to be run periodically; it should updated
+ some metrics records and then return]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_PERIOD" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default period in seconds at which data is sent to the metrics system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The main interface to the metrics package.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.MetricsException -->
+ <class name="MetricsException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException]]>
+ </doc>
+ </constructor>
+ <constructor name="MetricsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException
+
+ @param message an error message]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[General-purpose, unchecked metrics exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsException -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsRecord -->
+ <interface name="MetricsRecord" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value. The tagValue may be null,
+ which is treated the same as an empty String.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.
+
+ @param tagName name of a tag]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes, from the buffered data table, all rows having tags
+ that equal the tags that have been set on this record. For example,
+ if there are no tags on this record, all rows for this record name
+ would be removed. Or, if there is a single tag on this record, then
+ just rows containing a tag with the same name and value would be removed.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A named and optionally tagged set of records to be sent to the metrics
+ system. <p/>
+
+ A record name identifies the kind of data to be reported. For example, a
+ program reporting statistics relating to the disks on a computer might use
+ a record name "diskStats".<p/>
+
+ A record has zero or more <i>tags</i>. A tag has a name and a value. To
+ continue the example, the "diskStats" record might use a tag named
+ "diskName" to identify a particular disk. Sometimes it is useful to have
+ more than one tag, so there might also be a "diskType" with value "ide" or
+ "scsi" or whatever.<p/>
+
+ A record also has zero or more <i>metrics</i>. These are the named
+ values that are to be reported to the metrics system. In the "diskStats"
+ example, possible metric names would be "diskPercentFull", "diskPercentBusy",
+ "kbReadPerSecond", etc.<p/>
+
+ The general procedure for using a MetricsRecord is to fill in its tag and
+ metric values, and then call <code>update()</code> to pass the record to the
+ client library.
+ Metric data is not immediately sent to the metrics system
+ each time that <code>update()</code> is called.
+ An internal table is maintained, identified by the record name. This
+ table has columns
+ corresponding to the tag and the metric names, and rows
+ corresponding to each unique set of tag values. An update
+ either modifies an existing row in the table, or adds a new row with a set of
+ tag values that are different from all the other rows. Note that if there
+ are no tags, then there can be at most one row in the table. <p/>
+
+ Once a row is added to the table, its data will be sent to the metrics system
+ on every timer period, whether or not it has been updated since the previous
+ timer period. If this is inappropriate, for example if metrics were being
+ reported by some transient object in an application, the <code>remove()</code>
+ method can be used to remove the row and thus stop the data from being
+ sent.<p/>
+
+ Note that the <code>update()</code> method is atomic. This means that it is
+ safe for different threads to be updating the same metric. More precisely,
+ it is OK for different threads to call <code>update()</code> on MetricsRecord instances
+ with the same set of tag names and tag values. Different threads should
+ <b>not</b> use the same MetricsRecord instance at the same time.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsRecord -->
+ <!-- start class org.apache.hadoop.metrics.MetricsUtil -->
+ <class name="MetricsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to return the named context.
+ If the desired context cannot be created for any reason, the exception
+ is logged, and a null context is returned.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to create and return new metrics record instance within the
+ given context. This record is tagged with the host name.
+
+ @param context the context
+ @param recordName name of the record
+ @return newly created metrics record]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility class to simplify creation and reporting of hadoop metrics.
+
+ For examples of usage, see {@link org.apache.hadoop.dfs.DataNode}.
+ @see org.apache.hadoop.metrics.MetricsRecord
+ @see org.apache.hadoop.metrics.MetricsContext
+ @see org.apache.hadoop.metrics.ContextFactory]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsUtil -->
+ <!-- start interface org.apache.hadoop.metrics.Updater -->
+ <interface name="Updater" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Timer-based call-back from the metric library.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Call-back interface. See <code>MetricsContext.registerUpdater()</code>.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.Updater -->
+</package>
+<package name="org.apache.hadoop.metrics.file">
+ <!-- start class org.apache.hadoop.metrics.file.FileContext -->
+ <class name="FileContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="getFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the configured file name, or null.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, by opening in append-mode, the
+ file specified by the <code>fileName</code> attribute,
+ if specified. Otherwise the data will be written to standard
+ output.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring, closing the file.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Emits a metrics record to a file.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Flushes the output writer, forcing updates to disk.]]>
+ </doc>
+ </method>
+ <field name="FILE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="PERIOD_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Metrics context for writing metrics to a file.<p/>
+
+ This class is configured by setting ContextFactory attributes which in turn
+ are usually configured through a properties file. All the attributes are
+ prefixed by the contextName. For example, the properties file might contain:
+ <pre>
+ myContextName.fileName=/tmp/metrics.log
+ myContextName.period=5
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.file.FileContext -->
+</package>
+<package name="org.apache.hadoop.metrics.ganglia">
+ <!-- start class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+ <class name="GangliaContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GangliaContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of GangliaContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Context for sending metrics to Ganglia.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+</package>
+<package name="org.apache.hadoop.metrics.jvm">
+ <!-- start class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <class name="EventCounter" extends="org.apache.log4j.AppenderSkeleton"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="EventCounter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getFatal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getError" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWarn" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfo" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="requiresLayout" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A log4J Appender that simply counts logging events in three levels:
+ fatal, error and warn.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <!-- start class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+ <class name="JvmMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="init" return="org.apache.hadoop.metrics.jvm.JvmMetrics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="processName" type="java.lang.String"/>
+ <param name="sessionId" type="java.lang.String"/>
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[This will be called periodically (with the period being configuration
+ dependent).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Singleton class which eports Java Virtual Machine metrics to the metrics API.
+ Any application can create an instance of this class in order to emit
+ Java VM metrics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+</package>
+<package name="org.apache.hadoop.metrics.spi">
+ <!-- start class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <class name="AbstractMetricsContext" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsContext"/>
+ <constructor name="AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of AbstractMetricsContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ <doc>
+ <![CDATA[Initializes the context.]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for subclasses to access factory attributes.]]>
+ </doc>
+ </method>
+ <method name="getAttributeTable" return="java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="tableName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an attribute-value map derived from the factory attributes
+ by finding all factory attributes that begin with
+ <i>contextName</i>.<i>tableName</i>. The returned map consists of
+ those attributes with the contextName and tableName stripped off.]]>
+ </doc>
+ </method>
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.]]>
+ </doc>
+ </method>
+ <method name="getContextFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the factory by which this context was created.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free buffered data.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and frees buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new AbstractMetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="newRecord" return="org.apache.hadoop.metrics.spi.MetricsRecordImpl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Subclasses should override this if they subclass MetricsRecordImpl.
+ @param recordName the name of the record
+ @return newly created instance of MetricsRecordImpl or subclass]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at time intervals determined by
+ the configuration.
+
+ @param updater object to be run periodically; it should update
+ some metrics records]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sends a record to the metrics system.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called each period after all records have been emitted, this method does nothing.
+ Subclasses may override it in order to perform some kind of flush.]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.update(). Creates or updates a row in
+ the internal table of metric data.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.remove(). Removes all matching rows in
+ the internal table of metric data. A row matches if it has the same
+ tag names and values as record, but it may also have additional
+ tags.]]>
+ </doc>
+ </method>
+ <method name="getPeriod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the timer period.]]>
+ </doc>
+ </method>
+ <method name="setPeriod"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="period" type="int"/>
+ <doc>
+ <![CDATA[Sets the timer period]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The main class of the Service Provider Interface. This class should be
+ extended in order to integrate the Metrics API with a specific metrics
+ client library. <p/>
+
+ This class implements the internal table of metric data, and the timer
+ on which data is to be sent to the metrics system. Subclasses must
+ override the abstract <code>emitRecord</code> method in order to transmit
+ the data. <p/>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <class name="MetricsRecordImpl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsRecord"/>
+ <constructor name="MetricsRecordImpl" type="java.lang.String, org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileRecord]]>
+ </doc>
+ </constructor>
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes the row, if it exists, in the buffered data table having tags
+ that equal the tags that have been set on this record.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of MetricsRecord. Keeps a back-pointer to the context
+ from which it was created, and delegates back to it on <code>update</code>
+ and <code>remove()</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricValue -->
+ <class name="MetricValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricValue" type="java.lang.Number, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricValue]]>
+ </doc>
+ </constructor>
+ <method name="isIncrement" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumber" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="ABSOLUTE" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCREMENT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Number that is either an absolute or an incremental amount.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricValue -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContext -->
+ <class name="NullContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContext]]>
+ </doc>
+ </constructor>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do-nothing version of startMonitoring]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Null metrics context: a metrics context which does nothing. Used as the
+ default context, so that no performance data is emitted if no configuration
+ data is found.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <class name="NullContextWithUpdateThread" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContextWithUpdateThread"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContextWithUpdateThread]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A null context which has a thread calling
+ periodically when monitoring is started. This keeps the data sampled
+ correctly.
+ In all other respects, this is like the NULL context: No data is emitted.
+ This is suitable for Monitoring systems like JMX which reads the metrics
+ when someone reads the data from JMX.
+
+ The default impl of start and stop monitoring:
+ is the AbstractMetricsContext is good enough.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <!-- start class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <class name="OutputRecord" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTagNames" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of tag names]]>
+ </doc>
+ </method>
+ <method name="getTag" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a tag object which is can be a String, Integer, Short or Byte.
+
+ @return the tag value, or null if there is no such tag]]>
+ </doc>
+ </method>
+ <method name="getMetricNames" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of metric names.]]>
+ </doc>
+ </method>
+ <method name="getMetric" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the metric object which can be a Float, Integer, Short or Byte.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents a record of metric data to be sent to a metrics system.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <!-- start class org.apache.hadoop.metrics.spi.Util -->
+ <class name="Util" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="parse" return="java.util.List&lt;java.net.InetSocketAddress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="specs" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Parses a space and/or comma separated sequence of server specifications
+ of the form <i>hostname</i> or <i>hostname:port</i>. If
+ the specs string is null, defaults to localhost:defaultPort.
+
+ @return a list of InetSocketAddress objects.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Static utility methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.Util -->
+</package>
+<package name="org.apache.hadoop.metrics.util">
+ <!-- start class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <class name="MBeanUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MBeanUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="registerMBean" return="javax.management.ObjectName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="serviceName" type="java.lang.String"/>
+ <param name="nameName" type="java.lang.String"/>
+ <param name="theMbean" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Register the mbean using out standard MBeanName format
+ "hadoop.dfs:service=<serviceName>,name=<nameName>"
+ Where the <serviceName> and <nameName> are the supplied parameters
+
+ @param serviceName
+ @param nameName
+ @param theMbean - the MBean to register
+ @return the named used to register the MBean]]>
+ </doc>
+ </method>
+ <method name="unregisterMBean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mbeanName" type="javax.management.ObjectName"/>
+ </method>
+ <doc>
+ <![CDATA[This util class provides a method to register an MBean using
+ our standard naming convention as described in the doc
+ for {link {@link #registerMBean(String, String, Object)}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <class name="MetricsIntValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsIntValue" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="int"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - value to be added]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param decr - value to subtract]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Dec metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsIntValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsLongValue -->
+ <class name="MetricsLongValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsLongValue" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="long"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - value to be added]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decr" type="long"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param decr - value to subtract]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Dec metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsLongValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsLongValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <class name="MetricsTimeVaryingInt" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingInt" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - number of operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #previousIntervalValue}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Value at the Previous interval
+ @return prev interval value]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingInt class is for a metric that naturally
+ varies over time (e.g. number of files created).
+ The metric is is published at interval heart beat (the interval
+ is set in the metrics config file).
+ Note if one wants a time associated with the metric then use
+ @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+ <class name="MetricsTimeVaryingRate" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingRate" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param n the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numOps" type="int"/>
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for numOps operations
+ @param numOps - number of operations
+ @param time - time for numOps operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for one operation
+ @param time for one operation]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and
+ {@link #getPreviousIntervalNumOps()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalNumOps" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of operations in the previous interval
+ @return - ops in prev interval]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalAverageTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The average rate of an operation in the previous interval
+ @return - the average rate.]]>
+ </doc>
+ </method>
+ <method name="getMinTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The min time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return min time for an operation]]>
+ </doc>
+ </method>
+ <method name="getMaxTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The max time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return max time for an operation]]>
+ </doc>
+ </method>
+ <method name="resetMinMax"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the min max values]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingRate class is for a rate based metric that
+ naturally varies over time (e.g. time taken to create a file).
+ The rate is averaged at each interval heart beat (the interval
+ is set in the metrics config file).
+ This class also keeps track of the min and max rates along with
+ a method to reset the min-max.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+</package>
+<package name="org.apache.hadoop.net">
+ <!-- start class org.apache.hadoop.net.DNS -->
+ <class name="DNS" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DNS"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reverseDns" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hostIp" type="java.net.InetAddress"/>
+ <param name="ns" type="java.lang.String"/>
+ <exception name="NamingException" type="javax.naming.NamingException"/>
+ <doc>
+ <![CDATA[Returns the hostname associated with the specified IP address by the
+ provided nameserver.
+
+ @param hostIp
+ The address to reverse lookup
+ @param ns
+ The host name of a reachable DNS server
+ @return The host name associated with the provided IP
+ @throws NamingException
+ If a NamingException is encountered]]>
+ </doc>
+ </method>
+ <method name="getIPs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the IPs associated with the provided interface, if any, in
+ textual form.
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return A string vector of all the IPs associated with the provided
+ interface
+ @throws UnknownHostException
+ If an UnknownHostException is encountered in querying the
+ default interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultIP" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the first available IP address associated with the provided
+ network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The IP address in text form
+ @throws UnknownHostException
+ If one is encountered in querying the default interface]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the provided nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return A string vector of all host names associated with the IPs tied to
+ the specified interface
+ @throws UnknownHostException]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the default nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The list of host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the provided
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return The default host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the default
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The default host name associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides direct and reverse lookup functionalities, allowing
+ the querying of specific network interfaces or nameservers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.DNS -->
+ <!-- start interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <interface name="DNSToSwitchMapping" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="resolve" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List&lt;java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[Resolves a list of DNS-names/IP-addresses and returns back a list of
+ switch information (network paths). One-to-one correspondence must be
+ maintained between the elements in the lists.
+ Consider an element in the argument list - x.y.com. The switch information
+ that is returned must be a network path of the form /foo/rack,
+ where / is the root, and 'foo' is the switch where 'rack' is connected.
+ Note the hostname/ip-address is not part of the returned path.
+ The network topology of the cluster would determine the number of
+ components in the network path.
+ @param names
+ @return list of resolved network paths]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An interface that should be implemented to allow pluggable
+ DNS-name/IP-address to RackID resolvers.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <!-- start class org.apache.hadoop.net.NetUtils -->
+ <class name="NetUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="clazz" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Get the socket factory for the given class according to its
+ configuration parameter
+ <tt>hadoop.rpc.socket.factory.class.&lt;ClassName&gt;</tt>. When no
+ such parameter exists then fall back on the default socket factory as
+ configured by <tt>hadoop.rpc.socket.factory.class.default</tt>. If
+ this default socket factory is not configured, then fall back on the JVM
+ default socket factory.
+
+ @param conf the configuration
+ @param clazz the class (usually a {@link VersionedProtocol})
+ @return a socket factory]]>
+ </doc>
+ </method>
+ <method name="getDefaultSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default socket factory as specified by the configuration
+ parameter <tt>hadoop.rpc.socket.factory.default</tt>
+
+ @param conf the configuration
+ @return the default socket factory as specified in the configuration or
+ the JVM default socket factory if the configuration does not
+ contain a default socket factory property.]]>
+ </doc>
+ </method>
+ <method name="getSocketFactoryFromProperty" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="propValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the socket factory corresponding to the given proxy URI. If the
+ given proxy URI corresponds to an absence of configuration parameter,
+ returns null. If the URI is malformed raises an exception.
+
+ @param propValue the property which is the class name of the
+ SocketFactory to instantiate; assumed non null and non empty.
+ @return a socket factory as defined in the property value.]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="getServerAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="oldBindAddressName" type="java.lang.String"/>
+ <param name="oldPortName" type="java.lang.String"/>
+ <param name="newBindAddressName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Handle the transition from pairs of attributes specifying a host and port
+ to a single colon separated one.
+ @param conf the configuration to check
+ @param oldBindAddressName the old address attribute name
+ @param oldPortName the old port attribute name
+ @param newBindAddressName the new combined name
+ @return the complete address from the configuration]]>
+ </doc>
+ </method>
+ <method name="addStaticResolution"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="resolvedName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a static resolution for host. This can be used for setting up
+ hostnames with names that are fake to point to a well known host. For e.g.
+ in some testcases we require to have daemons with different hostnames
+ running on the same machine. In order to create connections to these
+ daemons, one can set up mappings from those hostnames to "localhost".
+ {@link NetUtils#getStaticResolution(String)} can be used to query for
+ the actual hostname.
+ @param host
+ @param resolvedName]]>
+ </doc>
+ </method>
+ <method name="getStaticResolution" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Retrieves the resolved name for the passed host. The resolved name must
+ have been set earlier using
+ {@link NetUtils#addStaticResolution(String, String)}
+ @param host
+ @return the resolution]]>
+ </doc>
+ </method>
+ <method name="getAllStaticResolutions" return="java.util.List&lt;java.lang.String[]&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is used to get all the resolutions that were added using
+ {@link NetUtils#addStaticResolution(String, String)}. The return
+ value is a List each element of which contains an array of String
+ of the form String[0]=hostname, String[1]=resolved-hostname
+ @return the list of resolutions]]>
+ </doc>
+ </method>
+ <method name="getConnectAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="server" type="org.apache.hadoop.ipc.Server"/>
+ <doc>
+ <![CDATA[Returns InetSocketAddress that a client can use to
+ connect to the server. Server.getListenerAddress() is not correct when
+ the server binds to "0.0.0.0". This returns "127.0.0.1:port" when
+ the getListenerAddress() returns "0.0.0.0:port".
+
+ @param server
+ @return socket address that a client can use to connect to the server.]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getInputStream(socket, socket.getSoTimeout()).<br><br>
+
+ From documentation for {@link #getInputStream(Socket, long)}:<br>
+ Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see #getInputStream(Socket, long)
+
+ @param socket
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getOutputStream(socket, 0). Timeout of zero implies write will
+ wait until data is available.<br><br>
+
+ From documentation for {@link #getOutputStream(Socket, long)} : <br>
+ Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see #getOutputStream(Socket, long)
+
+ @param socket
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetUtils -->
+ <!-- start class org.apache.hadoop.net.NetworkTopology -->
+ <class name="NetworkTopology" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetworkTopology"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Add a leaf node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be added
+ @exception IllegalArgumentException if add a node to a leave
+ or node to be added is not a leaf]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Remove a node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be removed]]>
+ </doc>
+ </method>
+ <method name="contains" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if the tree contains node <i>node</i>
+
+ @param node
+ a node
+ @return true if <i>node</i> is already in the tree; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="loc" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a string representation of a node, return its reference
+
+ @param loc
+ a path-like string representation of a node
+ @return a reference to the node; null if the node is not in the tree]]>
+ </doc>
+ </method>
+ <method name="getNumOfRacks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of racks]]>
+ </doc>
+ </method>
+ <method name="getNumOfLeaves" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of nodes]]>
+ </doc>
+ </method>
+ <method name="getDistance" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return the distance between two nodes
+ It is assumed that the distance from one node to its parent is 1
+ The distance between two nodes is calculated by summing up their distances
+ to their closest common ancestor.
+ @param node1 one node
+ @param node2 another node
+ @return the distance between node1 and node2
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="isOnSameRack" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if two nodes are on the same rack
+ @param node1 one node
+ @param node2 another node
+ @return true if node1 and node2 are pm the same rack; false otherwise
+ @exception IllegalArgumentException when either node1 or node2 is null, or
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="chooseRandom" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <doc>
+ <![CDATA[randomly choose one node from <i>scope</i>
+ if scope starts with ~, choose one from the all nodes except for the
+ ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
+ @param scope range of nodes from which a node will be choosen
+ @return the choosen node]]>
+ </doc>
+ </method>
+ <method name="countNumOfAvailableNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <param name="excludedNodes" type="java.util.List&lt;org.apache.hadoop.net.Node&gt;"/>
+ <doc>
+ <![CDATA[return the number of leaves in <i>scope</i> but not in <i>excludedNodes</i>
+ if scope starts with ~, return the number of nodes that are not
+ in <i>scope</i> and <i>excludedNodes</i>;
+ @param scope a path string that may start with ~
+ @param excludedNodes a list of nodes
+ @return number of available nodes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[convert a network tree to a string]]>
+ </doc>
+ </method>
+ <method name="pseudoSortByDistance"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reader" type="org.apache.hadoop.net.Node"/>
+ <param name="nodes" type="org.apache.hadoop.net.Node[]"/>
+ <doc>
+ <![CDATA[Sort nodes array by their distances to <i>reader</i>
+ It linearly scans the array, if a local node is found, swap it with
+ the first element of the array.
+ If a local rack node is found, swap it with the first element following
+ the local node.
+ If neither local node or local rack node is found, put a random replica
+ location at postion 0.
+ It leaves the rest nodes untouched.]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_RACK" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UNRESOLVED" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_HOST_LEVEL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The class represents a cluster of computer with a tree hierarchical
+ network topology.
+ For example, a cluster may be consists of many data centers filled
+ with racks of computers.
+ In a network topology, leaves represent data nodes (computers) and inner
+ nodes represent switches/routers that manage traffic in/out of data centers
+ or racks.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetworkTopology -->
+ <!-- start interface org.apache.hadoop.net.Node -->
+ <interface name="Node" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the string representation of this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the node's network location]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface defines a node in a network topology.
+ A node may be a leave representing a data node or an inner
+ node representing a datacenter or rack.
+ Each data has a name and its location in the network is
+ decided by a string with syntax similar to a file name.
+ For example, a data node's name is hostname:port# and if it's located at
+ rack "orange" in datacenter "dog", the string representation of its
+ network location is /dog/orange]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.Node -->
+ <!-- start class org.apache.hadoop.net.NodeBase -->
+ <class name="NodeBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.Node"/>
+ <constructor name="NodeBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its path
+ @param path
+ a concatenation of this node's location, the path seperator, and its name]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String, org.apache.hadoop.net.Node, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location
+ @param parent this node's parent node
+ @param level this node's level in the tree]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set this node's network location]]>
+ </doc>
+ </method>
+ <method name="getPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return this node's path]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's string representation]]>
+ </doc>
+ </method>
+ <method name="normalize" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Normalize a path]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="level" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree]]>
+ </doc>
+ </method>
+ <field name="PATH_SEPARATOR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PATH_SEPARATOR_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ROOT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="name" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="location" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="level" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="parent" type="org.apache.hadoop.net.Node"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class that implements interface Node]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NodeBase -->
+ <!-- start class org.apache.hadoop.net.ScriptBasedMapping -->
+ <class name="ScriptBasedMapping" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.net.DNSToSwitchMapping"/>
+ <constructor name="ScriptBasedMapping"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="resolve" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List&lt;java.lang.String&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[This class implements the {@link DNSToSwitchMapping} interface using a
+ script configured via topology.script.file.name .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.ScriptBasedMapping -->
+ <!-- start class org.apache.hadoop.net.SocketInputStream -->
+ <class name="SocketInputStream" extends="java.io.InputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.ReadableByteChannel"/>
+ <constructor name="SocketInputStream" type="java.nio.channels.ReadableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for reading, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), timeout): <br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), socket.getSoTimeout())
+ :<br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.ReadableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by inputstream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferFrom(ReadableByteChannel, long, long)}.]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="waitForReadable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[waits for the underlying channel to be ready for reading.
+ The timeout specified for this stream applies to this wait.
+
+ @throws SocketTimeoutException
+ if select on the channel times out.
+ @throws IOException
+ if any other I/O error occurs.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This implements an input stream that can have a timeout while reading.
+ This sets non-blocking flag on the socket channel.
+ So after create this object, read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} for the associated socket will throw
+ IllegalBlockingModeException.
+ Please use {@link SocketOutputStream} for writing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketInputStream -->
+ <!-- start class org.apache.hadoop.net.SocketOutputStream -->
+ <class name="SocketOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.WritableByteChannel"/>
+ <constructor name="SocketOutputStream" type="java.nio.channels.WritableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for writing, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketOutputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketOutputStream(socket.getChannel(), timeout):<br><br>
+
+ Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketOutputStream#SocketOutputStream(WritableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.WritableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by this stream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="waitForWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[waits for the underlying channel to be ready for writing.
+ The timeout specified for this stream applies to this wait.
+
+ @throws SocketTimeoutException
+ if select on the channel times out.
+ @throws IOException
+ if any other I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="transferToFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileCh" type="java.nio.channels.FileChannel"/>
+ <param name="position" type="long"/>
+ <param name="count" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Transfers data from FileChannel using
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}.
+
+ Similar to readFully(), this waits till requested amount of
+ data is transfered.
+
+ @param fileCh FileChannel to transfer data from.
+ @param position position within the channel where the transfer begins
+ @param count number of bytes to transfer.
+
+ @throws EOFException
+ If end of input file is reached before requested number of
+ bytes are transfered.
+
+ @throws SocketTimeoutException
+ If this channel blocks transfer longer than timeout for
+ this stream.
+
+ @throws IOException Includes any exception thrown by
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This implements an output stream that can have a timeout while writing.
+ This sets non-blocking flag on the socket channel.
+ So after creating this object , read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} on the associated socket will throw
+ llegalBlockingModeException.
+ Please use {@link SocketInputStream} for reading.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketOutputStream -->
+ <!-- start class org.apache.hadoop.net.SocksSocketFactory -->
+ <class name="SocksSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="SocksSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <constructor name="SocksSocketFactory" type="java.net.Proxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with a supplied Proxy
+
+ @param proxy the proxy to use to create sockets]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocksSocketFactory -->
+ <!-- start class org.apache.hadoop.net.StandardSocketFactory -->
+ <class name="StandardSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StandardSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.StandardSocketFactory -->
+</package>
+<package name="org.apache.hadoop.record">
+ <!-- start class org.apache.hadoop.record.BinaryRecordInput -->
+ <class name="BinaryRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="BinaryRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordInput" type="java.io.DataInput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inp" type="java.io.DataInput"/>
+ <doc>
+ <![CDATA[Get a thread-local record input for the supplied DataInput.
+ @param inp data input stream
+ @return binary record input corresponding to the supplied DataInput.]]>
+ </doc>
+ </method>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordInput -->
+ <!-- start class org.apache.hadoop.record.BinaryRecordOutput -->
+ <class name="BinaryRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="BinaryRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordOutput" type="java.io.DataOutput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <doc>
+ <![CDATA[Get a thread-local record output for the supplied DataOutput.
+ @param out data output stream
+ @return binary record output corresponding to the supplied DataOutput.]]>
+ </doc>
+ </method>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordOutput -->
+ <!-- start class org.apache.hadoop.record.Buffer -->
+ <class name="Buffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Buffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-count sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte array as the initial value.
+
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[], int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte range as the initial value.
+
+ @param bytes Copy of this array becomes the backing storage for the object.
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Use the specified bytes array as underlying sequence.
+
+ @param bytes byte sequence]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Copy the specified byte array to the Buffer. Replaces the current buffer.
+
+ @param bytes byte array to be assigned
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the Buffer.
+
+ @return The data is only valid between 0 and getCount() - 1.]]>
+ </doc>
+ </method>
+ <method name="getCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current count of the buffer.]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum count that could handled without
+ resizing the backing storage.
+
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newCapacity" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved if newCapacity >= getCount().
+ @param newCapacity The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the buffer to 0 size]]>
+ </doc>
+ </method>
+ <method name="truncate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Change the capacity of the backing store to be the same as the current
+ count of buffer.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer.
+
+ @param bytes byte array to be appended
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer
+
+ @param bytes byte array to be appended]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Define the sort order of the Buffer.
+
+ @param other The other buffer
+ @return Positive if this is bigger than other, 0 if they are equal, and
+ negative if this is smaller than other.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="charsetName" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ <doc>
+ <![CDATA[Convert the byte buffer to a string an specific character encoding
+
+ @param charsetName Valid Java Character Set Name]]>
+ </doc>
+ </method>
+ <method name="clone" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="CloneNotSupportedException" type="java.lang.CloneNotSupportedException"/>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is used as a Java native type for buffer.
+ It is resizable and distinguishes between the count of the seqeunce and
+ the current capacity.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Buffer -->
+ <!-- start class org.apache.hadoop.record.CsvRecordInput -->
+ <class name="CsvRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="CsvRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordInput -->
+ <!-- start class org.apache.hadoop.record.CsvRecordOutput -->
+ <class name="CsvRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="CsvRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordOutput -->
+ <!-- start interface org.apache.hadoop.record.Index -->
+ <interface name="Index" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="done" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="incr"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Interface that acts as an iterator for deserializing maps.
+ The deserializer returns an instance that the record uses to
+ read vectors and maps. An example of usage is as follows:
+
+ <code>
+ Index idx = startVector(...);
+ while (!idx.done()) {
+ .... // read element of a vector
+ idx.incr();
+ }
+ </code>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.Index -->
+ <!-- start class org.apache.hadoop.record.Record -->
+ <class name="Record" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Record"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="serialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record with tag (ususally field name)
+ @param rout Record output destination
+ @param tag record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record with a tag (usually field name)
+ @param rin Record input source
+ @param tag Record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record without a tag
+ @param rout Record output destination]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record without a tag
+ @param rin Record input source]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="din" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Abstract class that is extended by generated classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Record -->
+ <!-- start class org.apache.hadoop.record.RecordComparator -->
+ <class name="RecordComparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordComparator" type="java.lang.Class"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a raw {@link Record} comparison implementation.]]>
+ </doc>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.record.RecordComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link Record} implementation.
+
+ @param c record classs for which a raw comparator is provided
+ @param comparator Raw comparator instance for class c]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A raw record comparator base class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.RecordComparator -->
+ <!-- start interface org.apache.hadoop.record.RecordInput -->
+ <interface name="RecordInput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a byte from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a boolean from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a long integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a single-precision float from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a double-precision number from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read byte array from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of elements.]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of map entries.]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that all the Deserializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordInput -->
+ <!-- start interface org.apache.hadoop.record.RecordOutput -->
+ <interface name="RecordOutput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a byte to serialized record.
+ @param b Byte to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a boolean to serialized record.
+ @param b Boolean to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write an integer to serialized record.
+ @param i Integer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a long integer to serialized record.
+ @param l Long to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a single-precision float to serialized record.
+ @param f Float to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a double precision floating point number to serialized record.
+ @param d Double to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a unicode string to serialized record.
+ @param s String to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a buffer to serialized record.
+ @param buf Buffer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a record to be serialized.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized record.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a vector to be serialized.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized vector.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a map to be serialized.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized map.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that alll the serializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordOutput -->
+ <!-- start class org.apache.hadoop.record.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a stream and return it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a stream and returns it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an int to a binary stream with zero-compressed encoding.
+
+ @param stream Binary output stream
+ @param i int to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <field name="hexchars" type="char[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Utils -->
+ <!-- start class org.apache.hadoop.record.XmlRecordInput -->
+ <class name="XmlRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="XmlRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Deserializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordInput -->
+ <!-- start class org.apache.hadoop.record.XmlRecordOutput -->
+ <class name="XmlRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="XmlRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Serializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordOutput -->
+</package>
+<package name="org.apache.hadoop.record.compiler">
+ <!-- start class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <class name="CodeBuffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A wrapper around StringBuffer that automatically does indentation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.Consts -->
+ <class name="Consts" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="RIO_PREFIX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_VAR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER_FIELDS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_OUTPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_INPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TAG" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[const definitions for Record I/O compiler]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.Consts -->
+ <!-- start class org.apache.hadoop.record.compiler.JBoolean -->
+ <class name="JBoolean" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBoolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBoolean]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBoolean -->
+ <!-- start class org.apache.hadoop.record.compiler.JBuffer -->
+ <class name="JBuffer" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBuffer]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "buffer" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.JByte -->
+ <class name="JByte" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JByte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "byte" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JByte -->
+ <!-- start class org.apache.hadoop.record.compiler.JDouble -->
+ <class name="JDouble" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JDouble"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JDouble]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JDouble -->
+ <!-- start class org.apache.hadoop.record.compiler.JField -->
+ <class name="JField" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JField" type="java.lang.String, T"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JField]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[A thin wrappper around record field.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JField -->
+ <!-- start class org.apache.hadoop.record.compiler.JFile -->
+ <class name="JFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFile" type="java.lang.String, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JFile&gt;, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFile
+
+ @param name possibly full pathname to the file
+ @param inclFiles included files (as JFile)
+ @param recList List of records defined within this file]]>
+ </doc>
+ </constructor>
+ <method name="genCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <param name="destDir" type="java.lang.String"/>
+ <param name="options" type="java.util.ArrayList&lt;java.lang.String&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate record code in given language. Language should be all
+ lowercase.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Container for the Hadoop Record DDL.
+ The main components of the file are filename, list of included files,
+ and records defined in that file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFile -->
+ <!-- start class org.apache.hadoop.record.compiler.JFloat -->
+ <class name="JFloat" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFloat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFloat]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFloat -->
+ <!-- start class org.apache.hadoop.record.compiler.JInt -->
+ <class name="JInt" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JInt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JInt]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "int" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JInt -->
+ <!-- start class org.apache.hadoop.record.compiler.JLong -->
+ <class name="JLong" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JLong"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JLong]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "long" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JLong -->
+ <!-- start class org.apache.hadoop.record.compiler.JMap -->
+ <class name="JMap" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JMap" type="org.apache.hadoop.record.compiler.JType, org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JMap]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JMap -->
+ <!-- start class org.apache.hadoop.record.compiler.JRecord -->
+ <class name="JRecord" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JRecord" type="java.lang.String, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JField&lt;org.apache.hadoop.record.compiler.JType&gt;&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JRecord]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JRecord -->
+ <!-- start class org.apache.hadoop.record.compiler.JString -->
+ <class name="JString" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JString"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JString]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JString -->
+ <!-- start class org.apache.hadoop.record.compiler.JType -->
+ <class name="JType" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Abstract Base class for all types supported by Hadoop Record I/O.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JType -->
+ <!-- start class org.apache.hadoop.record.compiler.JVector -->
+ <class name="JVector" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JVector" type="org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JVector]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JVector -->
+</package>
+<package name="org.apache.hadoop.record.compiler.ant">
+ <!-- start class org.apache.hadoop.record.compiler.ant.RccTask -->
+ <class name="RccTask" extends="org.apache.tools.ant.Task"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RccTask"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of RccTask]]>
+ </doc>
+ </constructor>
+ <method name="setLanguage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the output language option
+ @param language "java"/"c++"]]>
+ </doc>
+ </method>
+ <method name="setFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets the record definition file attribute
+ @param file record definition file]]>
+ </doc>
+ </method>
+ <method name="setFailonerror"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="flag" type="boolean"/>
+ <doc>
+ <![CDATA[Given multiple files (via fileset), set the error handling behavior
+ @param flag true will throw build exception in case of failure (default)]]>
+ </doc>
+ </method>
+ <method name="setDestdir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets directory where output files will be generated
+ @param dir output directory]]>
+ </doc>
+ </method>
+ <method name="addFileset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="set" type="org.apache.tools.ant.types.FileSet"/>
+ <doc>
+ <![CDATA[Adds a fileset that can consist of one or more files
+ @param set Set of record definition files]]>
+ </doc>
+ </method>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="BuildException" type="org.apache.tools.ant.BuildException"/>
+ <doc>
+ <![CDATA[Invoke the Hadoop record compiler on each record definition file]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Hadoop record compiler ant Task
+<p> This task takes the given record definition files and compiles them into
+ java or c++
+ files. It is then up to the user to compile the generated files.
+
+ <p> The task requires the <code>file</code> or the nested fileset element to be
+ specified. Optional attributes are <code>language</code> (set the output
+ language, default is "java"),
+ <code>destdir</code> (name of the destination directory for generated java/c++
+ code, default is ".") and <code>failonerror</code> (specifies error handling
+ behavior. default is true).
+ <p><h4>Usage</h4>
+ <pre>
+ &lt;recordcc
+ destdir="${basedir}/gensrc"
+ language="java"&gt;
+ &lt;fileset include="**\/*.jr" /&gt;
+ &lt;/recordcc&gt;
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.ant.RccTask -->
+</package>
+<package name="org.apache.hadoop.record.compiler.generated">
+ <!-- start class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <class name="ParseException" extends="java.lang.Exception"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ParseException" type="org.apache.hadoop.record.compiler.generated.Token, int[][], java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constructor is used by the method "generateParseException"
+ in the generated parser. Calling this constructor generates
+ a new object of this type with the fields "currentToken",
+ "expectedTokenSequences", and "tokenImage" set. The boolean
+ flag "specialConstructor" is also set to true to indicate that
+ this constructor was used to create this object.
+ This constructor calls its super class with the empty string
+ to force the "toString" method of parent class "Throwable" to
+ print the error message in the form:
+ ParseException: <result of getMessage>]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The following constructors are for use by you for whatever
+ purpose you can think of. Constructing the exception in this
+ manner makes the exception behave in the normal way - i.e., as
+ documented in the class "Throwable". The fields "errorToken",
+ "expectedTokenSequences", and "tokenImage" do not contain
+ relevant information. The JavaCC generated code does not use
+ these constructors.]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method has the standard behavior when this object has been
+ created using the standard constructors. Otherwise, it uses
+ "currentToken" and "expectedTokenSequences" to generate a parse
+ error message and returns it. If this object has been created
+ due to a parse error, and you do not catch it (it gets thrown
+ from the parser), then this method is called during the printing
+ of the final stack trace, and hence the correct error message
+ gets displayed.]]>
+ </doc>
+ </method>
+ <method name="add_escapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Used to convert raw characters to their escaped version
+ when these raw version cannot be used as part of an ASCII
+ string literal.]]>
+ </doc>
+ </method>
+ <field name="specialConstructor" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This variable determines which constructor was used to create
+ this object and thereby affects the semantics of the
+ "getMessage" method (see below).]]>
+ </doc>
+ </field>
+ <field name="currentToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is the last token that has been consumed successfully. If
+ this object has been created due to a parse error, the token
+ followng this token will (therefore) be the first error token.]]>
+ </doc>
+ </field>
+ <field name="expectedTokenSequences" type="int[][]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Each entry in this array is an array of integers. Each array
+ of integers represents a sequence of tokens (by their ordinal
+ values) that is expected at this point of the parse.]]>
+ </doc>
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is a reference to the "tokenImage" array of the generated
+ parser within which the parse error occurred. This array is
+ defined in the generated ...Constants interface.]]>
+ </doc>
+ </field>
+ <field name="eol" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The end of line string for this machine.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This exception is thrown when parse errors are encountered.
+ You can explicitly create objects of this exception type by
+ calling the method generateParseException in the generated
+ parser.
+
+ You can modify this class to customize your error reporting
+ mechanisms so long as you retain the public fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <class name="Rcc" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="Rcc" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="usage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="driver" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="Input" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Include" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Module" return="java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ModuleName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="RecordList" return="java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Record" return="org.apache.hadoop.record.compiler.JRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Field" return="org.apache.hadoop.record.compiler.JField&lt;org.apache.hadoop.record.compiler.JType&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Type" return="org.apache.hadoop.record.compiler.JType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Map" return="org.apache.hadoop.record.compiler.JMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Vector" return="org.apache.hadoop.record.compiler.JVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tm" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"/>
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <method name="generateParseException" return="org.apache.hadoop.record.compiler.generated.ParseException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="enable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="disable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="token_source" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="token" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jj_nt" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <!-- start interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <interface name="RccConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="EOF" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MODULE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCLUDE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BOOLEAN_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SEMICOLON_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CSTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IDENT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinOneLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinMultiLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <class name="RccTokenManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setDebugStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ds" type="java.io.PrintStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="SwitchTo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="jjFillToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="debugStream" type="java.io.PrintStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjstrLiteralImages" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="lexStateNames" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjnewLexState" type="int[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="input_stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="curChar" type="char"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <class name="SimpleCharStream" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setTabSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="getTabSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="ExpandBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="wrapAround" type="boolean"/>
+ </method>
+ <method name="FillBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="BeginToken" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="UpdateLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="c" type="char"/>
+ </method>
+ <method name="readChar" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getEndColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getEndLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="backup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="amount" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="GetImage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="GetSuffix" return="char[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="int"/>
+ </method>
+ <method name="Done"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="adjustBeginLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newLine" type="int"/>
+ <param name="newCol" type="int"/>
+ <doc>
+ <![CDATA[Method to adjust line and column numbers for the start of a token.]]>
+ </doc>
+ </method>
+ <field name="staticFlag" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufpos" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufline" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufcolumn" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="column" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="line" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsCR" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsLF" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputStream" type="java.io.Reader"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="buffer" type="char[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="maxNextCharInd" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inBuf" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="tabSize" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of interface CharStream, where the stream is assumed to
+ contain only ASCII characters (without unicode processing).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Token -->
+ <class name="Token" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Token"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the image.]]>
+ </doc>
+ </method>
+ <method name="newToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="ofKind" type="int"/>
+ <doc>
+ <![CDATA[Returns a new Token object, by default. However, if you want, you
+ can create and return subclass objects based on the value of ofKind.
+ Simply add the cases to the switch for all those special cases.
+ For example, if you have a subclass of Token called IDToken that
+ you want to create if ofKind is ID, simlpy add something like :
+
+ case MyParserConstants.ID : return new IDToken();
+
+ to the following switch statement. Then you can cast matchedToken
+ variable to the appropriate type and use it in your lexical actions.]]>
+ </doc>
+ </method>
+ <field name="kind" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[An integer that describes the kind of this token. This numbering
+ system is determined by JavaCCParser, and a table of these numbers is
+ stored in the file ...Constants.java.]]>
+ </doc>
+ </field>
+ <field name="beginLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="beginColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="image" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The string image of the token.]]>
+ </doc>
+ </field>
+ <field name="next" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A reference to the next regular (non-special) token from the input
+ stream. If this is the last token from the input stream, or if the
+ token manager has not read tokens beyond this one, this field is
+ set to null. This is true only if this token is also a regular
+ token. Otherwise, see below for a description of the contents of
+ this field.]]>
+ </doc>
+ </field>
+ <field name="specialToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This field is used to access special tokens that occur prior to this
+ token, but after the immediately preceding regular (non-special) token.
+ If there are no such special tokens, this field is set to null.
+ When there are more than one such special token, this field refers
+ to the last of these special tokens, which in turn refers to the next
+ previous special token through its specialToken field, and so on
+ until the first special token (whose specialToken field is null).
+ The next fields of special tokens refer to other special tokens that
+ immediately follow it (without an intervening regular token). If there
+ is no such token, this field is null.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Describes the input token stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Token -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+ <class name="TokenMgrError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TokenMgrError"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="boolean, int, int, int, java.lang.String, char, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addEscapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Replaces unprintable characters by their espaced (or unicode escaped)
+ equivalents in the given string]]>
+ </doc>
+ </method>
+ <method name="LexicalError" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="EOFSeen" type="boolean"/>
+ <param name="lexState" type="int"/>
+ <param name="errorLine" type="int"/>
+ <param name="errorColumn" type="int"/>
+ <param name="errorAfter" type="java.lang.String"/>
+ <param name="curChar" type="char"/>
+ <doc>
+ <![CDATA[Returns a detailed message for the Error when it is thrown by the
+ token manager to indicate a lexical error.
+ Parameters :
+ EOFSeen : indicates if EOF caused the lexicl error
+ curLexState : lexical state in which this error occured
+ errorLine : line number when the error occured
+ errorColumn : column number when the error occured
+ errorAfter : prefix that was seen before this error occured
+ curchar : the offending character
+ Note: You can customize the lexical error message by modifying this method.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[You can also modify the body of this method to customize your error messages.
+ For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not
+ of end-users concern, so you can return something like :
+
+ "Internal Error : Please file a bug report .... "
+
+ from this method for such cases in the release version of your parser.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+</package>
+<package name="org.apache.hadoop.record.meta">
+ <!-- start class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <class name="FieldTypeInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's TypeID object]]>
+ </doc>
+ </method>
+ <method name="getFieldID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's id (name)]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two FieldTypeInfos are equal if ach of their fields matches]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ti" type="org.apache.hadoop.record.meta.FieldTypeInfo"/>
+ </method>
+ <doc>
+ <![CDATA[Represents a type information for a field, which is made up of its
+ ID (name) and its type (a TypeID object).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.MapTypeID -->
+ <class name="MapTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapTypeID" type="org.apache.hadoop.record.meta.TypeID, org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getKeyTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's key element]]>
+ </doc>
+ </method>
+ <method name="getValueTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's value element]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two map typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a Map]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.MapTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <class name="RecordTypeInfo" extends="org.apache.hadoop.record.Record"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty RecordTypeInfo object.]]>
+ </doc>
+ </constructor>
+ <constructor name="RecordTypeInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a RecordTypeInfo object representing a record with the given name
+ @param name Name of the record]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the name of the record]]>
+ </doc>
+ </method>
+ <method name="setName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[set the name of the record]]>
+ </doc>
+ </method>
+ <method name="addField"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fieldName" type="java.lang.String"/>
+ <param name="tid" type="org.apache.hadoop.record.meta.TypeID"/>
+ <doc>
+ <![CDATA[Add a field.
+ @param fieldName Name of the field
+ @param tid Type ID of the field]]>
+ </doc>
+ </method>
+ <method name="getFieldTypeInfos" return="java.util.Collection&lt;org.apache.hadoop.record.meta.FieldTypeInfo&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a collection of field type infos]]>
+ </doc>
+ </method>
+ <method name="getNestedStructTypeInfo" return="org.apache.hadoop.record.meta.RecordTypeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the type info of a nested record. We only consider nesting
+ to one level.
+ @param name Name of the nested record]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer_" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ <doc>
+ <![CDATA[This class doesn't implement Comparable as it's not meant to be used
+ for anything besides de/serializing.
+ So we always throw an exception.
+ Not implemented. Always returns 0 if another RecordTypeInfo is passed in.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A record's Type Information object which can read/write itself.
+
+ Type information for a record comprises metadata about the record,
+ as well as a collection of type information for each field in the record.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.StructTypeID -->
+ <class name="StructTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StructTypeID" type="org.apache.hadoop.record.meta.RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a StructTypeID based on the RecordTypeInfo of some record]]>
+ </doc>
+ </constructor>
+ <method name="getFieldTypeInfos" return="java.util.Collection&lt;org.apache.hadoop.record.meta.FieldTypeInfo&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a struct]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.StructTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID -->
+ <class name="TypeID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeVal" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type value. One of the constants in RIOType.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two base typeIDs are equal if they refer to the same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <field name="BoolTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constant classes for the basic types, so we can share them.]]>
+ </doc>
+ </field>
+ <field name="BufferTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ByteTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DoubleTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FloatTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IntTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LongTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="StringTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="typeVal" type="byte"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Represents typeID for basic types.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <class name="TypeID.RIOType" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TypeID.RIOType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <field name="BOOL" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRUCT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[constants representing the IDL types we support]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <!-- start class org.apache.hadoop.record.meta.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <param name="typeID" type="org.apache.hadoop.record.meta.TypeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[read/skip bytes from stream based on a type]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O platform.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.Utils -->
+ <!-- start class org.apache.hadoop.record.meta.VectorTypeID -->
+ <class name="VectorTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VectorTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getElementTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two vector typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for vector.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.VectorTypeID -->
+</package>
+<package name="org.apache.hadoop.security">
+ <!-- start class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <class name="UnixUserGroupInformation" extends="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnixUserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameters user name and its group names.
+ The first entry in the groups list is the default group.
+
+ @param userName a user's name
+ @param groupNames groups list, first of which is the default group
+ @exception IllegalArgumentException if any argument is null]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameter user/group names
+
+ @param ugi an array containing user/group names, the first
+ element of which is the user name, the second of
+ which is the default group name.
+ @exception IllegalArgumentException if the array size is less than 2
+ or any element is null.]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Create an immutable {@link UnixUserGroupInformation} object.]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an array of group names]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the user's name]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize this object
+ First check if this is a UGI in the string format.
+ If no, throw an IOException; otherwise
+ set this object's fields by reading them from the given data input
+
+ @param in input stream
+ @exception IOException is thrown if encounter any error when reading]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize this object
+ First write a string marking that this is a UGI in the string format,
+ then write this object's serialized form to the given data output
+
+ @param out output stream
+ @exception IOException if encounter any error during writing]]>
+ </doc>
+ </method>
+ <method name="saveToConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <param name="ugi" type="org.apache.hadoop.security.UnixUserGroupInformation"/>
+ <doc>
+ <![CDATA[Store the given <code>ugi</code> as a comma separated string in
+ <code>conf</code> as a property <code>attr</code>
+
+ The String starts with the user name followed by the default group names,
+ and other group names.
+
+ @param conf configuration
+ @param attr property name
+ @param ugi a UnixUserGroupInformation]]>
+ </doc>
+ </method>
+ <method name="readFromConf" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Read a UGI from the given <code>conf</code>
+
+ The object is expected to store with the property name <code>attr</code>
+ as a comma separated string that starts
+ with the user name followed by group names.
+ If the property name is not defined, return null.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise, construct a UGI from the configuration, store it in the
+ ugi map and return it.
+
+ @param conf configuration
+ @param attr property name
+ @return a UnixUGI
+ @throws LoginException if the stored string is ill-formatted.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get current user's name and the names of all its groups from Unix.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise get the current user's information from Unix, store it
+ in the map, and return it.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Equivalent to login(conf, false).]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="save" type="boolean"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get a user's name & its group names from the given configuration;
+ If it is not defined in the configuration, get the current user's
+ information from Unix.
+ If the user has a UGI in the ugi map, return the one in
+ the UGI map.
+
+ @param conf either a job configuration or client's configuration
+ @param save saving it to conf?
+ @return UnixUserGroupInformation a user/group information
+ @exception LoginException if not able to get the user/group information]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Decide if two UGIs are the same
+
+ @param other other object
+ @return true if they are the same; false otherwise.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code for this UGI.
+ The hash code for a UGI is the hash code of its user name string.
+
+ @return a hash code value for this UGI.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this object to a string
+
+ @return a comma separated string containing the user name and group names]]>
+ </doc>
+ </method>
+ <field name="UGI_PROPERTY_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of UserGroupInformation in the Unix system]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <!-- start class org.apache.hadoop.security.UserGroupInformation -->
+ <class name="UserGroupInformation" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="UserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCurrentUGI" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="setCurrentUGI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <doc>
+ <![CDATA[Set the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get username
+
+ @return the user's name]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the groups that the user belong to
+
+ @return an array of group names]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Login and return a UserGroupInformation object.]]>
+ </doc>
+ </method>
+ <method name="readFrom" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link UserGroupInformation} from conf]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A {@link Writable} abstract class for storing user and groups information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UserGroupInformation -->
+</package>
+<package name="org.apache.hadoop.tools">
+ <!-- start class org.apache.hadoop.tools.DistCp -->
+ <class name="DistCp" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="DistCp" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="destPath" type="java.lang.String"/>
+ <param name="logPath" type="org.apache.hadoop.fs.Path"/>
+ <param name="srcAsList" type="boolean"/>
+ <param name="ignoreReadFailures" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This is the main driver for recursively copying directories
+ across file systems. It takes at least two cmdline parameters. A source
+ URL and a destination URL. It then essentially does an "ls -lR" on the
+ source URL, and writes the output in a round-robin manner to all the map
+ input files. The mapper actually copies the files allotted to it. The
+ reduce is empty.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="getRandomId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A Map-reduce program to recursively copy directories between
+ different file-systems.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCp -->
+ <!-- start class org.apache.hadoop.tools.DistCp.DuplicationException -->
+ <class name="DistCp.DuplicationException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="ERROR_CODE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Error code for this exception]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An exception class for duplicated source files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCp.DuplicationException -->
+ <!-- start class org.apache.hadoop.tools.HadoopArchives -->
+ <class name="HadoopArchives" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="HadoopArchives" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="archive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPaths" type="java.util.List&lt;org.apache.hadoop.fs.Path&gt;"/>
+ <param name="archiveName" type="java.lang.String"/>
+ <param name="dest" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[archive the given source paths into
+ the dest
+ @param srcPaths the src paths to be archived
+ @param dest the dest dir that will contain the archive]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[the main driver for creating the archives
+ it takes at least two command line parameters. The src and the
+ dest. It does an lsr on the source paths.
+ The mapper created archuves and the reducer creates
+ the archive index.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[the main functions]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[a archive creation utility.
+ This class provides methods that can be used
+ to create hadoop archives. For understanding of
+ Hadoop archives look at {@link HarFileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.HadoopArchives -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer -->
+ <class name="Logalyzer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Logalyzer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doArchive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logListURI" type="java.lang.String"/>
+ <param name="archiveDirectory" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doArchive: Workhorse function to archive log-files.
+ @param logListURI : The uri which will serve list of log-files to archive.
+ @param archiveDirectory : The directory to store archived logfiles.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="doAnalyze"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFilesDirectory" type="java.lang.String"/>
+ <param name="outputDirectory" type="java.lang.String"/>
+ <param name="grepPattern" type="java.lang.String"/>
+ <param name="sortColumns" type="java.lang.String"/>
+ <param name="columnSeparator" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doAnalyze:
+ @param inputFilesDirectory : Directory containing the files to be analyzed.
+ @param outputDirectory : Directory to store analysis (output).
+ @param grepPattern : Pattern to *grep* for.
+ @param sortColumns : Sort specification for output.
+ @param columnSeparator : Column separator.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[Logalyzer: A utility tool for archiving and analyzing hadoop logs.
+ <p>
+ This tool supports archiving and anaylzing (sort/grep) of log-files.
+ It takes as input
+ a) Input uri which will serve uris of the logs to be archived.
+ b) Output directory (not mandatory).
+ b) Directory on dfs to archive the logs.
+ c) The sort/grep patterns for analyzing the files and separator for boundaries.
+ Usage:
+ Logalyzer -archive -archiveDir <directory to archive logs> -analysis <directory> -logs <log-list uri> -grep <pattern> -sort <col1, col2> -separator <separator>
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <class name="Logalyzer.LogComparator" extends="org.apache.hadoop.io.Text.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Logalyzer.LogComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys of the logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+ <class name="Logalyzer.LogRegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="Logalyzer.LogRegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+</package>
+<package name="org.apache.hadoop.util">
+ <!-- start class org.apache.hadoop.util.CyclicIteration -->
+ <class name="CyclicIteration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable&lt;java.util.Map.Entry&lt;K, V&gt;&gt;"/>
+ <constructor name="CyclicIteration" type="java.util.SortedMap&lt;K, V&gt;, K"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an {@link Iterable} object,
+ so that an {@link Iterator} can be created
+ for iterating the given {@link SortedMap}.
+ The iteration begins from the starting key exclusively.]]>
+ </doc>
+ </constructor>
+ <method name="iterator" return="java.util.Iterator&lt;java.util.Map.Entry&lt;K, V&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide an cyclic {@link Iterator} for a {@link SortedMap}.
+ The {@link Iterator} navigates the entries of the map
+ according to the map's ordering.
+ If the {@link Iterator} hits the last entry of the map,
+ it will then continue from the first entry.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.CyclicIteration -->
+ <!-- start class org.apache.hadoop.util.Daemon -->
+ <class name="Daemon" extends="java.lang.Thread"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Daemon"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.ThreadGroup, java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread to be part of a specified thread group.]]>
+ </doc>
+ </constructor>
+ <method name="getRunnable" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A thread that has called {@link Thread#setDaemon(boolean) } with true.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Daemon -->
+ <!-- start class org.apache.hadoop.util.DiskChecker -->
+ <class name="DiskChecker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mkdirsWithExistsCheck" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[The semantics of mkdirsWithExistsCheck method is different from the mkdirs
+ method provided in the Sun's java.io.File class in the following way:
+ While creating the non-existent parent directories, this method checks for
+ the existence of those directories if the mkdir fails at any point (since
+ that directory might have just been created by some other process).
+ If both mkdir() and the exists() check fails for any seemingly
+ non-existent directory, then we signal an error; Sun's mkdir would signal
+ an error (return false) if a directory it is attempting to create already
+ exists or the mkdir fails.
+ @param dir
+ @return true on success, false on failure]]>
+ </doc>
+ </method>
+ <method name="checkDir"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ </method>
+ <doc>
+ <![CDATA[Class that provides utility functions for checking disk problem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <class name="DiskChecker.DiskErrorException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskErrorException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <class name="DiskChecker.DiskOutOfSpaceException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskOutOfSpaceException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <!-- start class org.apache.hadoop.util.GenericOptionsParser -->
+ <class name="GenericOptionsParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser<code> to parse only the generic Hadoop
+ arguments.
+
+ The array of string arguments other than the generic arguments can be
+ obtained by {@link #getRemainingArgs()}.
+
+ @param conf the <code>Configuration</code> to modify.
+ @param args command-line arguments.]]>
+ </doc>
+ </constructor>
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, org.apache.commons.cli.Options, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser</code> to parse given options as well
+ as generic Hadoop options.
+
+ The resulting <code>CommandLine</code> object can be obtained by
+ {@link #getCommandLine()}.
+
+ @param conf the configuration to modify
+ @param options options built by the caller
+ @param args User-specified arguments]]>
+ </doc>
+ </constructor>
+ <method name="getRemainingArgs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array of Strings containing only application-specific arguments.
+
+ @return array of <code>String</code>s containing the un-parsed arguments
+ or <strong>empty array</strong> if commandLine was not defined.]]>
+ </doc>
+ </method>
+ <method name="getCommandLine" return="org.apache.commons.cli.CommandLine"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the commons-cli <code>CommandLine</code> object
+ to process the parsed arguments.
+
+ Note: If the object is created with
+ {@link #GenericOptionsParser(Configuration, String[])}, then returned
+ object will only contain parsed generic options.
+
+ @return <code>CommandLine</code> representing list of arguments
+ parsed against Options descriptor.]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Print the usage message for generic command-line options supported.
+
+ @param out stream to print the usage message to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>GenericOptionsParser</code> is a utility to parse command line
+ arguments generic to the Hadoop framework.
+
+ <code>GenericOptionsParser</code> recognizes several standarad command
+ line arguments, enabling applications to easily specify a namenode, a
+ jobtracker, additional configuration resources etc.
+
+ <h4 id="GenericOptions">Generic Options</h4>
+
+ <p>The supported generic options are:</p>
+ <p><blockquote><pre>
+ -conf &lt;configuration file&gt; specify a configuration file
+ -D &lt;property=value&gt; use value for given property
+ -fs &lt;local|namenode:port&gt; specify a namenode
+ -jt &lt;local|jobtracker:port&gt; specify a job tracker
+ -files &lt;comma separated list of files&gt; specify comma separated
+ files to be copied to the map reduce cluster
+ -libjars &lt;comma separated list of jars&gt; specify comma separated
+ jar files to include in the classpath.
+ -archives &lt;comma separated list of archives&gt; specify comma
+ separated archives to be unarchived on the compute machines.
+
+ </pre></blockquote></p>
+
+ <p>The general command line syntax is:</p>
+ <p><tt><pre>
+ bin/hadoop command [genericOptions] [commandOptions]
+ </pre></tt></p>
+
+ <p>Generic command line arguments <strong>might</strong> modify
+ <code>Configuration </code> objects, given to constructors.</p>
+
+ <p>The functionality is implemented using Commons CLI.</p>
+
+ <p>Examples:</p>
+ <p><blockquote><pre>
+ $ bin/hadoop dfs -fs darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -conf hadoop-site.xml -ls /data
+ list /data directory in dfs with conf specified in hadoop-site.xml
+
+ $ bin/hadoop job -D mapred.job.tracker=darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt local -submit job.xml
+ submit a job to local runner
+
+ $ bin/hadoop jar -libjars testlib.jar
+ -archives test.tgz -files file.txt inputjar args
+ job submission with libjars, files and archives
+ </pre></blockquote></p>
+
+ @see Tool
+ @see ToolRunner]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericOptionsParser -->
+ <!-- start class org.apache.hadoop.util.GenericsUtil -->
+ <class name="GenericsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericsUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClass" return="java.lang.Class&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <doc>
+ <![CDATA[Returns the Class object (of type <code>Class&lt;T&gt;</code>) of the
+ argument of type <code>T</code>.
+ @param <T> The type of the argument
+ @param t the object to get it class
+ @return <code>Class&lt;T&gt;</code>]]>
+ </doc>
+ </method>
+ <method name="toArray" return="T[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <param name="list" type="java.util.List&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param c the Class object of the items in the list
+ @param list the list to convert]]>
+ </doc>
+ </method>
+ <method name="toArray" return="T[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="list" type="java.util.List&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param list the list to convert
+ @throws ArrayIndexOutOfBoundsException if the list is empty.
+ Use {@link #toArray(Class, List)} if the list may be empty.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Contains utility methods for dealing with Java Generics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericsUtil -->
+ <!-- start class org.apache.hadoop.util.HeapSort -->
+ <class name="HeapSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="HeapSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the given range of items using heap sort.
+ {@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of HeapSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.HeapSort -->
+ <!-- start class org.apache.hadoop.util.HostsFileReader -->
+ <class name="HostsFileReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HostsFileReader" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="refresh"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getHosts" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExcludedHosts" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.util.HostsFileReader -->
+ <!-- start interface org.apache.hadoop.util.IndexedSortable -->
+ <interface name="IndexedSortable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Compare items at the given addresses consistent with the semantics of
+ {@link java.util.Comparable#compare}.]]>
+ </doc>
+ </method>
+ <method name="swap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Swap items at the given addresses.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for collections capable of being sorted by {@link IndexedSorter}
+ algorithms.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSortable -->
+ <!-- start interface org.apache.hadoop.util.IndexedSorter -->
+ <interface name="IndexedSorter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the items accessed through the given IndexedSortable over the given
+ range of logical indices. From the perspective of the sort algorithm,
+ each index between l (inclusive) and r (exclusive) is an addressable
+ entry.
+ @see IndexedSortable#compare
+ @see IndexedSortable#swap]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Same as {@link #sort(IndexedSortable,int,int)}, but indicate progress
+ periodically.
+ @see #sort(IndexedSortable,int,int)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for sort algorithms accepting {@link IndexedSortable} items.
+
+ A sort algorithm implementing this interface may only
+ {@link IndexedSortable#compare} and {@link IndexedSortable#swap} items
+ for a range of indices to effect a sort across that range.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSorter -->
+ <!-- start class org.apache.hadoop.util.MergeSort -->
+ <class name="MergeSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MergeSort" type="java.util.Comparator&lt;org.apache.hadoop.io.IntWritable&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mergeSort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="int[]"/>
+ <param name="dest" type="int[]"/>
+ <param name="low" type="int"/>
+ <param name="high" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of MergeSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.MergeSort -->
+ <!-- start class org.apache.hadoop.util.NativeCodeLoader -->
+ <class name="NativeCodeLoader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeCodeLoader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeCodeLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if native-hadoop code is loaded for this platform.
+
+ @return <code>true</code> if native-hadoop is loaded,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getLoadNativeLibraries" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Return if native hadoop libraries, if present, can be used for this job.
+ @param jobConf job configuration
+
+ @return <code>true</code> if native hadoop libraries, if present, can be
+ used for this job; <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setLoadNativeLibraries"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="loadNativeLibraries" type="boolean"/>
+ <doc>
+ <![CDATA[Set if native hadoop libraries, if present, can be used for this job.
+
+ @param jobConf job configuration
+ @param loadNativeLibraries can native hadoop libraries be loaded]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A helper to load the native hadoop code i.e. libhadoop.so.
+ This handles the fallback to either the bundled libhadoop-Linux-i386-32.so
+ or the the default java implementations where appropriate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.NativeCodeLoader -->
+ <!-- start class org.apache.hadoop.util.PlatformName -->
+ <class name="PlatformName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PlatformName"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPlatformName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete platform as per the java-vm.
+ @return returns the complete platform as per the java-vm.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[A helper class for getting build-info of the java-vm.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PlatformName -->
+ <!-- start class org.apache.hadoop.util.PrintJarMainClass -->
+ <class name="PrintJarMainClass" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PrintJarMainClass"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A micro-application that prints the main class name out of a jar file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PrintJarMainClass -->
+ <!-- start class org.apache.hadoop.util.PriorityQueue -->
+ <class name="PriorityQueue" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PriorityQueue"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="lessThan" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Determines the ordering of objects in this priority queue. Subclasses
+ must define this one method.]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="maxSize" type="int"/>
+ <doc>
+ <![CDATA[Subclass constructors must call this.]]>
+ </doc>
+ </method>
+ <method name="put"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Adds an Object to a PriorityQueue in log(size) time.
+ If one tries to add more objects than maxSize from initialize
+ a RuntimeException (ArrayIndexOutOfBound) is thrown.]]>
+ </doc>
+ </method>
+ <method name="insert" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Adds element to the PriorityQueue in log(size) time if either
+ the PriorityQueue is not full, or not lessThan(element, top()).
+ @param element
+ @return true if element is added, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="top" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the least element of the PriorityQueue in constant time.]]>
+ </doc>
+ </method>
+ <method name="pop" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes and returns the least element of the PriorityQueue in log(size)
+ time.]]>
+ </doc>
+ </method>
+ <method name="adjustTop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should be called when the Object at top changes values. Still log(n)
+ worst case, but it's at least twice as fast to <pre>
+ { pq.top().change(); pq.adjustTop(); }
+ </pre> instead of <pre>
+ { o = pq.pop(); o.change(); pq.push(o); }
+ </pre>]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of elements currently stored in the PriorityQueue.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes all entries from the PriorityQueue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A PriorityQueue maintains a partial ordering of its elements such that the
+ least element can always be found in constant time. Put()'s and pop()'s
+ require log(size) time.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PriorityQueue -->
+ <!-- start class org.apache.hadoop.util.ProgramDriver -->
+ <class name="ProgramDriver" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ProgramDriver"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="mainClass" type="java.lang.Class"/>
+ <param name="description" type="java.lang.String"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is the method that adds the classed to the repository
+ @param name The name of the string you want the class instance to be called with
+ @param mainClass The class that you want to add to the repository
+ @param description The description of the class
+ @throws NoSuchMethodException
+ @throws SecurityException]]>
+ </doc>
+ </method>
+ <method name="driver"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is a driver for the example programs.
+ It looks at the first command line argument and tries to find an
+ example program with that name.
+ If it is found, it calls the main method in that class with the rest
+ of the command line arguments.
+ @param args The argument from the user. args[0] is the command to run.
+ @throws NoSuchMethodException
+ @throws SecurityException
+ @throws IllegalAccessException
+ @throws IllegalArgumentException
+ @throws Throwable Anything thrown by the example program's main]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A driver that is used to run programs added to it]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ProgramDriver -->
+ <!-- start class org.apache.hadoop.util.Progress -->
+ <class name="Progress" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Progress"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new root node.]]>
+ </doc>
+ </constructor>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a named node to the tree.]]>
+ </doc>
+ </method>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds a node to the tree.]]>
+ </doc>
+ </method>
+ <method name="startNextPhase"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Called during execution to move to the next phase at this level in the
+ tree.]]>
+ </doc>
+ </method>
+ <method name="phase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current sub-node executing.]]>
+ </doc>
+ </method>
+ <method name="complete"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Completes this node, moving the parent node to its next child.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progress" type="float"/>
+ <doc>
+ <![CDATA[Called during execution on a leaf node to set its progress.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the overall progress of the root.]]>
+ </doc>
+ </method>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Utility to assist with generation of progress reports. Applications build
+ a hierarchy of {@link Progress} instances, each modelling a phase of
+ execution. The root is constructed with {@link #Progress()}. Nodes for
+ sub-phases are created by calling {@link #addPhase()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Progress -->
+ <!-- start interface org.apache.hadoop.util.Progressable -->
+ <interface name="Progressable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Report progress to the Hadoop framework.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for reporting progress.
+
+ <p>Clients and/or applications can use the provided <code>Progressable</code>
+ to explicitly report progress to the Hadoop framework. This is especially
+ important for operations which take an insignificant amount of time since,
+ in-lieu of the reported progress, the framework has to assume that an error
+ has occured and time-out the operation.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Progressable -->
+ <!-- start class org.apache.hadoop.util.QuickSort -->
+ <class name="QuickSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="QuickSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMaxDepth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="x" type="int"/>
+ <doc>
+ <![CDATA[Deepest recursion before giving up and doing a heapsort.
+ Returns 2 * ceil(log(n)).]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the given range of items using quick sort.
+ {@inheritDoc} If the recursion depth falls below {@link #getMaxDepth},
+ then switch to {@link HeapSort}.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of QuickSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.QuickSort -->
+ <!-- start class org.apache.hadoop.util.ReflectionUtils -->
+ <class name="ReflectionUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReflectionUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theObject" type="java.lang.Object"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check and set 'configuration' if necessary.
+
+ @param theObject object for which to set configuration
+ @param conf Configuration]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create an object for the given class and initialize it from conf
+
+ @param theClass class of which an object is created
+ @param conf Configuration
+ @return a new object]]>
+ </doc>
+ </method>
+ <method name="setContentionTracing"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="boolean"/>
+ </method>
+ <method name="printThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.PrintWriter"/>
+ <param name="title" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Print all of the thread's information and stack traces.
+
+ @param stream the stream to
+ @param title a string title for the stack trace]]>
+ </doc>
+ </method>
+ <method name="logThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="title" type="java.lang.String"/>
+ <param name="minInterval" type="long"/>
+ <doc>
+ <![CDATA[Log the current thread stacks at INFO level.
+ @param log the logger that logs the stack trace
+ @param title a descriptive title for the call stacks
+ @param minInterval the minimum time from the last]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="T"/>
+ <doc>
+ <![CDATA[Return the correctly-typed {@link Class} of the given object.
+
+ @param o object whose correctly-typed <code>Class</code> is to be obtained
+ @return the correctly typed <code>Class</code> of the given object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General reflection utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ReflectionUtils -->
+ <!-- start class org.apache.hadoop.util.RunJar -->
+ <class name="RunJar" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RunJar"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="unJar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jarFile" type="java.io.File"/>
+ <param name="toDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unpack a jar file into a directory.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Run a Hadoop job jar. If the main class is not in the jar's manifest,
+ then it must be provided on the command line.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Run a Hadoop job jar.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.RunJar -->
+ <!-- start class org.apache.hadoop.util.ServletUtil -->
+ <class name="ServletUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ServletUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initHTML" return="java.io.PrintWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="response" type="javax.servlet.ServletResponse"/>
+ <param name="title" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initial HTML header]]>
+ </doc>
+ </method>
+ <method name="getParameter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.ServletRequest"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a parameter from a ServletRequest.
+ Return null if the parameter contains only white spaces.]]>
+ </doc>
+ </method>
+ <method name="htmlFooter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HTML footer to be added in the jsps.
+ @return the HTML footer.]]>
+ </doc>
+ </method>
+ <field name="HTML_TAIL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.util.ServletUtil -->
+ <!-- start class org.apache.hadoop.util.Shell -->
+ <class name="Shell" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param interval the minimum duration to wait before re-executing the
+ command.]]>
+ </doc>
+ </constructor>
+ <method name="getGROUPS_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's groups list]]>
+ </doc>
+ </method>
+ <method name="getGET_PERMISSION_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a Unix command to get permission information.]]>
+ </doc>
+ </method>
+ <method name="getUlimitMemoryCommand" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the Unix command for setting the maximum virtual memory available
+ to a given child process. This is only relevant when we are forking a
+ process from within the {@link org.apache.hadoop.mapred.Mapper} or the
+ {@link org.apache.hadoop.mapred.Reducer} implementations
+ e.g. <a href="{@docRoot}/org/apache/hadoop/mapred/pipes/package-summary.html">Hadoop Pipes</a>
+ or <a href="{@docRoot}/org/apache/hadoop/streaming/package-summary.html">Hadoop Streaming</a>.
+
+ It also checks to ensure that we are running on a *nix platform else
+ (e.g. in Cygwin/Windows) it returns <code>null</code>.
+ @param job job configuration
+ @return a <code>String[]</code> with the ulimit command arguments or
+ <code>null</code> if we are running on a non *nix platform or
+ if the limit is unspecified.]]>
+ </doc>
+ </method>
+ <method name="setEnvironment"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="env" type="java.util.Map&lt;java.lang.String, java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[set the environment for the command
+ @param env Mapping of environment variables]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[set the working directory
+ @param dir The directory where the command would be executed]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[check to see if a command needs to be executed and execute if needed]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return an array containing the command name & its parameters]]>
+ </doc>
+ </method>
+ <method name="parseExecResult"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the execution result]]>
+ </doc>
+ </method>
+ <method name="getProcess" return="java.lang.Process"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the current sub-process executing the given command
+ @return process executing the command]]>
+ </doc>
+ </method>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the exit code
+ @return the exit code of the process]]>
+ </doc>
+ </method>
+ <method name="execCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Static method to execute a shell command.
+ Covers most of the simple cases without requiring the user to implement
+ the <code>Shell</code> interface.
+ @param cmd shell command to execute.
+ @return the output of the executed command.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USER_NAME_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's name]]>
+ </doc>
+ </field>
+ <field name="SET_PERMISSION_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set permission]]>
+ </doc>
+ </field>
+ <field name="SET_OWNER_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set owner]]>
+ </doc>
+ </field>
+ <field name="SET_GROUP_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WINDOWS" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set to true on Windows platforms]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A base class for running a Unix command.
+
+ <code>Shell</code> can be used to run unix commands like <code>du</code> or
+ <code>df</code>. It also offers facilities to gate commands by
+ time-intervals.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell -->
+ <!-- start class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <class name="Shell.ExitCodeException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ExitCodeException" type="int, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is an IOException with exit code added.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <!-- start class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <class name="Shell.ShellCommandExecutor" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File, java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the shell command.]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getOutput" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the output of the shell command.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple shell command executor.
+
+ <code>ShellCommandExecutor</code>should be used in cases where the output
+ of the command needs no explicit parsing and where the command, working
+ directory and the environment remains unchanged. The output of the command
+ is stored as-is and is expected to be small.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <!-- start class org.apache.hadoop.util.StringUtils -->
+ <class name="StringUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StringUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stringifyException" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Make a string representation of the exception.
+ @param e The exception to stringify
+ @return A string with exception name and call stack.]]>
+ </doc>
+ </method>
+ <method name="simpleHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fullHostname" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a full hostname, return the word upto the first dot.
+ @param fullHostname the full hostname
+ @return the hostname to the first dot]]>
+ </doc>
+ </method>
+ <method name="humanReadableInt" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="number" type="long"/>
+ <doc>
+ <![CDATA[Given an integer, return a string that is in an approximate, but human
+ readable format.
+ It uses the bases 'k', 'm', and 'g' for 1024, 1024**2, and 1024**3.
+ @param number the number to format
+ @return a human readable form of the integer]]>
+ </doc>
+ </method>
+ <method name="formatPercent" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="done" type="double"/>
+ <param name="digits" type="int"/>
+ <doc>
+ <![CDATA[Format a percentage for presentation to the user.
+ @param done the percentage to format (0.0 to 1.0)
+ @param digits the number of digits past the decimal point
+ @return a string representation of the percentage]]>
+ </doc>
+ </method>
+ <method name="arrayToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strs" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Given an array of strings, return a comma-separated list of its elements.
+ @param strs Array of strings
+ @return Empty string if strs.length is 0, comma separated list of strings
+ otherwise]]>
+ </doc>
+ </method>
+ <method name="byteToHexString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Given an array of bytes it will convert the bytes to a hex string
+ representation of the bytes
+ @param bytes
+ @return hex string representation of the byte array]]>
+ </doc>
+ </method>
+ <method name="hexStringToByte" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a hexstring this will return the byte array corresponding to the
+ string
+ @param hex the hex String array
+ @return a byte array that is a hex string representation of the given
+ string. The size of the byte array is therefore hex.length/2]]>
+ </doc>
+ </method>
+ <method name="uriToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uris" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[@param uris]]>
+ </doc>
+ </method>
+ <method name="stringToURI" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="stringToPath" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="formatTimeDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Given a finish and start time in long milliseconds, returns a
+ String in the format Xhrs, Ymins, Z sec, for the time difference between two times.
+ If finish time comes before start time then negative valeus of X, Y and Z wil return.
+
+ @param finishTime finish time
+ @param startTime start time]]>
+ </doc>
+ </method>
+ <method name="getFormattedTimeWithDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dateFormat" type="java.text.DateFormat"/>
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Formats time in ms and appends difference (finishTime - startTime)
+ as returned by formatTimeDiff().
+ If finish time is 0, empty string is returned, if start time is 0
+ then difference is not appended to return value.
+ @param dateFormat date format to use
+ @param finishTime fnish time
+ @param startTime start time
+ @return formatted value.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an arraylist of strings.
+ @param str the comma seperated string values
+ @return the arraylist of the comma seperated string values]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a collection of strings.
+ @param str comma seperated string values
+ @return an <code>ArrayList</code> of string values]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Split a string using the default separator
+ @param str a string that may have escaped separator
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="separator" type="char"/>
+ <doc>
+ <![CDATA[Split a string using the given separator
+ @param str a string that may have escaped separator
+ @param escapeChar a char that be used to escape the separator
+ @param separator a separator char
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Escape commas in the string using the default escape char
+ @param str a string
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Escape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the char to be escaped
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Unescape commas in the string using the default escape char
+ @param str a string
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Unescape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the escaped char
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="getHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return hostname without throwing exception.
+ @return hostname]]>
+ </doc>
+ </method>
+ <method name="startupShutdownMessage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <param name="args" type="java.lang.String[]"/>
+ <param name="LOG" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Print a log message for starting up and shutting down
+ @param clazz the class of the server
+ @param args arguments
+ @param LOG the target log object]]>
+ </doc>
+ </method>
+ <field name="COMMA" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ESCAPE_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[General string utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.StringUtils -->
+ <!-- start interface org.apache.hadoop.util.Tool -->
+ <interface name="Tool" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Execute the command with the given arguments.
+
+ @param args command specific arguments.
+ @return exit code.
+ @throws Exception]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A tool interface that supports handling of generic command-line options.
+
+ <p><code>Tool</code>, is the standard for any Map-Reduce tool/application.
+ The tool/application should delegate the handling of
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ standard command-line options</a> to {@link ToolRunner#run(Tool, String[])}
+ and only handle its custom arguments.</p>
+
+ <p>Here is how a typical <code>Tool</code> is implemented:</p>
+ <p><blockquote><pre>
+ public class MyApp extends Configured implements Tool {
+
+ public int run(String[] args) throws Exception {
+ // <code>Configuration</code> processed by <code>ToolRunner</code>
+ Configuration conf = getConf();
+
+ // Create a JobConf using the processed <code>conf</code>
+ JobConf job = new JobConf(conf, MyApp.class);
+
+ // Process custom command-line options
+ Path in = new Path(args[1]);
+ Path out = new Path(args[2]);
+
+ // Specify various job-specific parameters
+ job.setJobName("my-app");
+ job.setInputPath(in);
+ job.setOutputPath(out);
+ job.setMapperClass(MyApp.MyMapper.class);
+ job.setReducerClass(MyApp.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ }
+
+ public static void main(String[] args) throws Exception {
+ // Let <code>ToolRunner</code> handle generic command-line options
+ int res = ToolRunner.run(new Configuration(), new Sort(), args);
+
+ System.exit(res);
+ }
+ }
+ </pre></blockquote></p>
+
+ @see GenericOptionsParser
+ @see ToolRunner]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Tool -->
+ <!-- start class org.apache.hadoop.util.ToolRunner -->
+ <class name="ToolRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ToolRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the given <code>Tool</code> by {@link Tool#run(String[])}, after
+ parsing with the given generic arguments. Uses the given
+ <code>Configuration</code>, or builds one if null.
+
+ Sets the <code>Tool</code>'s configuration with the possibly modified
+ version of the <code>conf</code>.
+
+ @param conf <code>Configuration</code> for the <code>Tool</code>.
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the <code>Tool</code> with its <code>Configuration</code>.
+
+ Equivalent to <code>run(tool.getConf(), tool, args)</code>.
+
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Prints generic command-line argurments and usage information.
+
+ @param out stream to write usage information to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A utility to help run {@link Tool}s.
+
+ <p><code>ToolRunner</code> can be used to run classes implementing
+ <code>Tool</code> interface. It works in conjunction with
+ {@link GenericOptionsParser} to parse the
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ generic hadoop command line arguments</a> and modifies the
+ <code>Configuration</code> of the <code>Tool</code>. The
+ application-specific options are passed along without being modified.
+ </p>
+
+ @see Tool
+ @see GenericOptionsParser]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ToolRunner -->
+ <!-- start class org.apache.hadoop.util.VersionInfo -->
+ <class name="VersionInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the Hadoop version.
+ @return the Hadoop version string, eg. "0.6.3-dev"]]>
+ </doc>
+ </method>
+ <method name="getRevision" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion revision number for the root directory
+ @return the revision number, eg. "451451"]]>
+ </doc>
+ </method>
+ <method name="getDate" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The date that Hadoop was compiled.
+ @return the compilation date in unix date format]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The user that compiled Hadoop.
+ @return the username of the user]]>
+ </doc>
+ </method>
+ <method name="getUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion URL for the root Hadoop directory.]]>
+ </doc>
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the buildVersion which includes version,
+ revision, user and date.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[This class finds the package info for Hadoop and the HadoopVersionAnnotation
+ information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.VersionInfo -->
+ <!-- start class org.apache.hadoop.util.XMLUtils -->
+ <class name="XMLUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="XMLUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="transform"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="styleSheet" type="java.io.InputStream"/>
+ <param name="xml" type="java.io.InputStream"/>
+ <param name="out" type="java.io.Writer"/>
+ <exception name="TransformerConfigurationException" type="javax.xml.transform.TransformerConfigurationException"/>
+ <exception name="TransformerException" type="javax.xml.transform.TransformerException"/>
+ <doc>
+ <![CDATA[Transform input xml given a stylesheet.
+
+ @param styleSheet the style-sheet
+ @param xml input xml data
+ @param out output
+ @throws TransformerConfigurationException
+ @throws TransformerException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General xml utilities.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.XMLUtils -->
+</package>
+
+</api>
diff --git a/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.19.0.xml b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.19.0.xml
new file mode 100644
index 0000000000..557ac3cc59
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.19.0.xml
@@ -0,0 +1,43972 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Mon Nov 24 23:31:18 UTC 2008 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="hadoop 0.19.0"
+ jdversion="1.1.1">
+
+<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/hadoopqa/tools/jdiff/latest/jdiff.jar:/home/hadoopqa/tools/jdiff/latest/xerces.jar -classpath /home/ndaley/hadoop/branch-0.19/build/classes:/home/ndaley/hadoop/branch-0.19/lib/commons-cli-2.0-SNAPSHOT.jar:/home/ndaley/hadoop/branch-0.19/lib/commons-codec-1.3.jar:/home/ndaley/hadoop/branch-0.19/lib/commons-httpclient-3.0.1.jar:/home/ndaley/hadoop/branch-0.19/lib/commons-logging-1.0.4.jar:/home/ndaley/hadoop/branch-0.19/lib/commons-logging-api-1.0.4.jar:/home/ndaley/hadoop/branch-0.19/lib/commons-net-1.4.1.jar:/home/ndaley/hadoop/branch-0.19/lib/hsqldb-1.8.0.10.jar:/home/ndaley/hadoop/branch-0.19/lib/jets3t-0.6.1.jar:/home/ndaley/hadoop/branch-0.19/lib/jetty-5.1.4.jar:/home/ndaley/hadoop/branch-0.19/lib/jetty-ext/commons-el.jar:/home/ndaley/hadoop/branch-0.19/lib/jetty-ext/jasper-compiler.jar:/home/ndaley/hadoop/branch-0.19/lib/jetty-ext/jasper-runtime.jar:/home/ndaley/hadoop/branch-0.19/lib/jetty-ext/jsp-api.jar:/home/ndaley/hadoop/branch-0.19/lib/junit-3.8.1.jar:/home/ndaley/hadoop/branch-0.19/lib/kfs-0.2.0.jar:/home/ndaley/hadoop/branch-0.19/lib/log4j-1.2.15.jar:/home/ndaley/hadoop/branch-0.19/lib/oro-2.0.8.jar:/home/ndaley/hadoop/branch-0.19/lib/servlet-api.jar:/home/ndaley/hadoop/branch-0.19/lib/slf4j-api-1.4.3.jar:/home/ndaley/hadoop/branch-0.19/lib/slf4j-log4j12-1.4.3.jar:/home/ndaley/hadoop/branch-0.19/lib/xmlenc-0.52.jar:/home/ndaley/hadoop/branch-0.19/conf:/home/ndaley/tools/ant/latest/lib/ant-launcher.jar:/home/ndaley/tools/ant/latest/lib/ant-antlr.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-bcel.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-bsf.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-log4j.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-oro.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-regexp.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-resolver.jar:/home/ndaley/tools/ant/latest/lib/ant-commons-logging.jar:/home/ndaley/tools/ant/latest/lib/ant-commons-net.jar:/home/ndaley/tools/ant/latest/lib/ant-jai.jar:/home/ndaley/tools/ant/latest/lib/ant-javamail.jar:/home/ndaley/tools/ant/latest/lib/ant-jdepend.jar:/home/ndaley/tools/ant/latest/lib/ant-jmf.jar:/home/ndaley/tools/ant/latest/lib/ant-jsch.jar:/home/ndaley/tools/ant/latest/lib/ant-junit.jar:/home/ndaley/tools/ant/latest/lib/ant-netrexx.jar:/home/ndaley/tools/ant/latest/lib/ant-nodeps.jar:/home/ndaley/tools/ant/latest/lib/ant-starteam.jar:/home/ndaley/tools/ant/latest/lib/ant-stylebook.jar:/home/ndaley/tools/ant/latest/lib/ant-swing.jar:/home/ndaley/tools/ant/latest/lib/ant-testutil.jar:/home/ndaley/tools/ant/latest/lib/ant-trax.jar:/home/ndaley/tools/ant/latest/lib/ant-weblogic.jar:/home/ndaley/tools/ant/latest/lib/ant.jar:/home/ndaley/tools/ant/latest/lib/xercesImpl.jar:/home/ndaley/tools/ant/latest/lib/xml-apis.jar:/nfs/ystools/vol/ystools/releng/build/Linux_2.6_rh4_x86_64/tools/java/jdk1.6.0_i586/lib/tools.jar -sourcepath /home/ndaley/hadoop/branch-0.19/src/core:/home/ndaley/hadoop/branch-0.19/src/mapred:/home/ndaley/hadoop/branch-0.19/src/tools -apidir /home/ndaley/hadoop/branch-0.19/docs/jdiff -apiname hadoop 0.19.0 -->
+<package name="org.apache.hadoop">
+ <!-- start class org.apache.hadoop.HadoopVersionAnnotation -->
+ <class name="HadoopVersionAnnotation" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.annotation.Annotation"/>
+ <doc>
+ <![CDATA[A package attribute that captures the version of Hadoop that was compiled.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.HadoopVersionAnnotation -->
+</package>
+<package name="org.apache.hadoop.conf">
+ <!-- start interface org.apache.hadoop.conf.Configurable -->
+ <interface name="Configurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration to be used by this object.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the configuration used by this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Something that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.conf.Configurable -->
+ <!-- start class org.apache.hadoop.conf.Configuration -->
+ <class name="Configuration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable&lt;java.util.Map.Entry&lt;java.lang.String, java.lang.String&gt;&gt;"/>
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configuration" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration where the behavior of reading from the default
+ resources can be turned off.
+
+ If the parameter {@code loadDefaults} is false, the new instance
+ will not load resources from the default files.
+ @param loadDefaults specifies whether to load from the default files]]>
+ </doc>
+ </constructor>
+ <constructor name="Configuration" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration with the same settings cloned from another.
+
+ @param other the configuration from which to clone settings.]]>
+ </doc>
+ </constructor>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param name resource to be added, the classpath is examined for a file
+ with that name.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="url" type="java.net.URL"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param url url of the resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param file file-path of resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param in InputStream to deserialize the object from.]]>
+ </doc>
+ </method>
+ <method name="reloadConfiguration"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reload configuration from previously added resources.
+
+ This method will clear all the configuration read from the added
+ resources, and final parameters. This will make the resources to
+ be read again before accessing the values. Values that are added
+ via set methods will overlay values read from the resources.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, <code>null</code> if
+ no such property exists.
+
+ Values are processed for <a href="#VariableExpansion">variable expansion</a>
+ before being returned.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="getRaw" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, without doing
+ <a href="#VariableExpansion">variable expansion</a>.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the <code>value</code> of the <code>name</code> property.
+
+ @param name property name.
+ @param value property value.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property. If no such property
+ exists, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value, or <code>defaultValue</code> if the property
+ doesn't exist.]]>
+ </doc>
+ </method>
+ <method name="getInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="int"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as an <code>int</code>.
+
+ If no such property exists, or if the specified value is not a valid
+ <code>int</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as an <code>int</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to an <code>int</code>.
+
+ @param name property name.
+ @param value <code>int</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="long"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>long</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>long</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>long</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>long</code>.
+
+ @param name property name.
+ @param value <code>long</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="float"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>float</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>float</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>float</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getBoolean" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="boolean"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>boolean</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>boolean</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>boolean</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setBoolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>boolean</code>.
+
+ @param name property name.
+ @param value <code>boolean</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Parse the given attribute as a set of integer ranges
+ @param name the attribute name
+ @param defaultValue the default value if it is not set
+ @return a new set of ranges from the configured value]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ a collection of <code>String</code>s.
+ If no such property is specified then empty collection is returned.
+ <p>
+ This is an optimized version of {@link #getStrings(String)}
+
+ @param name property name.
+ @return property value as a collection of <code>String</code>s.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then <code>null</code> is returned.
+
+ @param name property name.
+ @return property value as an array of <code>String</code>s,
+ or <code>null</code>.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then default value is returned.
+
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of <code>String</code>s,
+ or default value.]]>
+ </doc>
+ </method>
+ <method name="setStrings"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="values" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Set the array of string values for the <code>name</code> property as
+ as comma delimited values.
+
+ @param name property name.
+ @param values The values]]>
+ </doc>
+ </method>
+ <method name="getClassByName" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Load a class by name.
+
+ @param name the class name.
+ @return the class object.
+ @throws ClassNotFoundException if the class is not found.]]>
+ </doc>
+ </method>
+ <method name="getClasses" return="java.lang.Class[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class[]"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property
+ as an array of <code>Class</code>.
+ The value of the property specifies a list of comma separated class names.
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ @param name the property name.
+ @param defaultValue default value.
+ @return property value as a <code>Class[]</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>.
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;? extends U&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends U&gt;"/>
+ <param name="xface" type="java.lang.Class&lt;U&gt;"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>
+ implementing the interface specified by <code>xface</code>.
+
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ An exception is thrown if the returned class does not implement the named
+ interface.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @param xface the interface implemented by the named class.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="xface" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to the name of a
+ <code>theClass</code> implementing the given interface <code>xface</code>.
+
+ An exception is thrown if <code>theClass</code> does not implement the
+ interface <code>xface</code>.
+
+ @param name property name.
+ @param theClass property value.
+ @param xface the interface implemented by the named class.]]>
+ </doc>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file under a directory named by <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file name under a directory named in <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getResource" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the {@link URL} for the named resource.
+
+ @param name resource name.
+ @return the url for the named resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get an input stream attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return an input stream attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsReader" return="java.io.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a {@link Reader} attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return a reader attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of keys in the configuration.
+
+ @return number of keys in the configuration.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Clears all keys from the configuration.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;java.util.Map.Entry&lt;java.lang.String, java.lang.String&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get an {@link Iterator} to go through the list of <code>String</code>
+ key-value pairs in the configuration.
+
+ @return an iterator over the entries.]]>
+ </doc>
+ </method>
+ <method name="writeXml"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write out the non-default properties in this configuration to the give
+ {@link OutputStream}.
+
+ @param out the output stream to write to.]]>
+ </doc>
+ </method>
+ <method name="getClassLoader" return="java.lang.ClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link ClassLoader} for this job.
+
+ @return the correct class loader.]]>
+ </doc>
+ </method>
+ <method name="setClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="classLoader" type="java.lang.ClassLoader"/>
+ <doc>
+ <![CDATA[Set the class loader that will be used to load the various objects.
+
+ @param classLoader the new class loader.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setQuietMode"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="quietmode" type="boolean"/>
+ <doc>
+ <![CDATA[Set the quiteness-mode.
+
+ In the quite-mode error and informational messages might not be logged.
+
+ @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
+ to turn it off.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[For debugging. List non-default properties to the terminal and exit.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Provides access to configuration parameters.
+
+ <h4 id="Resources">Resources</h4>
+
+ <p>Configurations are specified by resources. A resource contains a set of
+ name/value pairs as XML data. Each resource is named by either a
+ <code>String</code> or by a {@link Path}. If named by a <code>String</code>,
+ then the classpath is examined for a file with that name. If named by a
+ <code>Path</code>, then the local filesystem is examined directly, without
+ referring to the classpath.
+
+ <p>Unless explicitly turned off, Hadoop by default specifies two
+ resources, loaded in-order from the classpath: <ol>
+ <li><tt><a href="{@docRoot}/../hadoop-default.html">hadoop-default.xml</a>
+ </tt>: Read-only defaults for hadoop.</li>
+ <li><tt>hadoop-site.xml</tt>: Site-specific configuration for a given hadoop
+ installation.</li>
+ </ol>
+ Applications may add additional resources, which are loaded
+ subsequent to these resources in the order they are added.
+
+ <h4 id="FinalParams">Final Parameters</h4>
+
+ <p>Configuration parameters may be declared <i>final</i>.
+ Once a resource declares a value final, no subsequently-loaded
+ resource can alter that value.
+ For example, one might define a final parameter with:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;dfs.client.buffer.dir&lt;/name&gt;
+ &lt;value&gt;/tmp/hadoop/dfs/client&lt;/value&gt;
+ <b>&lt;final&gt;true&lt;/final&gt;</b>
+ &lt;/property&gt;</pre></tt>
+
+ Administrators typically define parameters as final in
+ <tt>hadoop-site.xml</tt> for values that user applications may not alter.
+
+ <h4 id="VariableExpansion">Variable Expansion</h4>
+
+ <p>Value strings are first processed for <i>variable expansion</i>. The
+ available properties are:<ol>
+ <li>Other properties defined in this Configuration; and, if a name is
+ undefined here,</li>
+ <li>Properties in {@link System#getProperties()}.</li>
+ </ol>
+
+ <p>For example, if a configuration resource contains the following property
+ definitions:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;basedir&lt;/name&gt;
+ &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
+ &lt;/property&gt;
+
+ &lt;property&gt;
+ &lt;name&gt;tempdir&lt;/name&gt;
+ &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt;
+ &lt;/property&gt;</pre></tt>
+
+ When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt>
+ will be resolved to another property in this Configuration, while
+ <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value
+ of the System property with that name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration -->
+ <!-- start class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <class name="Configuration.IntegerRanges" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Configuration.IntegerRanges"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Configuration.IntegerRanges" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isIncluded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Is the given value in the set of ranges
+ @param value the value to check
+ @return is the value in the ranges?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A class that represents a set of positive integer ranges. It parses
+ strings of the form: "2-3,5,7-" where ranges are separated by comma and
+ the lower/upper bounds are separated by dash. Either the lower or upper
+ bound may be omitted meaning all values up to or over. So the string
+ above means 2, 3, 5, and 7, 8, 9, ...]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <!-- start class org.apache.hadoop.conf.Configured -->
+ <class name="Configured" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Configured"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configured" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Base class for things that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configured -->
+</package>
+<package name="org.apache.hadoop.filecache">
+ <!-- start class org.apache.hadoop.filecache.DistributedCache -->
+ <class name="DistributedCache" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedCache"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param fileStatus The file status on the dfs.
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred is
+ returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="honorSymLinkConf" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param fileStatus The file status on the dfs.
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred is
+ returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @param honorSymLinkConf if this is false, then the symlinks are not
+ created even if conf says so (this is required for an optimization in task
+ launches
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred
+ is returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="releaseCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is the opposite of getlocalcache. When you are done with
+ using the cache, you need to release the cache
+ @param cache The cache URI to be released
+ @param conf configuration which contains the filesystem the cache
+ is contained in.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeRelative" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTimestamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="cache" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns mtime of a given cache file on hdfs.
+ @param conf configuration
+ @param cache cache file
+ @return mtime of a given cache file on hdfs
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createAllSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="jobCacheDir" type="java.io.File"/>
+ <param name="workDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method create symlinks for all files in a given dir in another directory
+ @param conf the configuration
+ @param jobCacheDir the target directory for creating symlinks
+ @param workDir the directory in which the symlinks are created
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setCacheArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archives" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of archives
+ @param archives The list of archives that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="setCacheFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of files
+ @param files The list of files that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="getCacheArchives" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache archives set in the Configuration
+ @param conf The configuration which contains the archives
+ @return A URI array of the caches set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCacheFiles" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache files set in the Configuration
+ @param conf The configuration which contains the files
+ @return A URI array of the files set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheArchives" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized caches
+ @param conf Configuration that contains the localized archives
+ @return A path array of localized caches
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized files
+ @param conf Configuration that contains the localized files
+ @return A path array of localized files
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getArchiveTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the archives
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFileTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the files
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setArchiveTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the archives to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of archives.
+ The order should be the same as the order in which the archives are added.]]>
+ </doc>
+ </method>
+ <method name="setFileTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the files to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of files.
+ The order should be the same as the order in which the files are added.]]>
+ </doc>
+ </method>
+ <method name="setLocalArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized archives
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+ </doc>
+ </method>
+ <method name="setLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized files
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+ </doc>
+ </method>
+ <method name="addCacheArchive"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a archives to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addCacheFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a file to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addFileToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an file path to the current set of classpath entries It adds the file
+ to cache as well.
+
+ @param file Path of the file to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getFileClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the file entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="addArchiveToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archive" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an archive path to the current set of classpath entries. It adds the
+ archive to cache as well.
+
+ @param archive Path of the archive to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getArchiveClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the archive entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method allows you to create symlinks in the current working directory
+ of the task to all the cache files/archives
+ @param conf the jobconf]]>
+ </doc>
+ </method>
+ <method name="getSymlink" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method checks to see if symlinks are to be create for the
+ localized cache files in the current working directory
+ @param conf the jobconf
+ @return true if symlinks are to be created- else return false]]>
+ </doc>
+ </method>
+ <method name="checkURIs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uriFiles" type="java.net.URI[]"/>
+ <param name="uriArchives" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[This method checks if there is a conflict in the fragment names
+ of the uris. Also makes sure that each uri has a fragment. It
+ is only to be called if you want to create symlinks for
+ the various archives and files.
+ @param uriFiles The uri array of urifiles
+ @param uriArchives the uri array of uri archives]]>
+ </doc>
+ </method>
+ <method name="purgeCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clear the entire contents of the cache and delete the backing files. This
+ should only be used when the server is reinitializing, because the users
+ are going to lose their files.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Distribute application-specific large, read-only files efficiently.
+
+ <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
+ framework to cache files (text, archives, jars etc.) needed by applications.
+ </p>
+
+ <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
+ via the {@link org.apache.hadoop.mapred.JobConf}.
+ The <code>DistributedCache</code> assumes that the
+ files specified via hdfs:// urls are already present on the
+ {@link FileSystem} at the path specified by the url.</p>
+
+ <p>The framework will copy the necessary files on to the slave node before
+ any tasks for the job are executed on that node. Its efficiency stems from
+ the fact that the files are only copied once per job and the ability to
+ cache archives which are un-archived on the slaves.</p>
+
+ <p><code>DistributedCache</code> can be used to distribute simple, read-only
+ data/text files and/or more complex types such as archives, jars etc.
+ Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes.
+ Jars may be optionally added to the classpath of the tasks, a rudimentary
+ software distribution mechanism. Files have execution permissions.
+ Optionally users can also direct it to symlink the distributed cache file(s)
+ into the working directory of the task.</p>
+
+ <p><code>DistributedCache</code> tracks modification timestamps of the cache
+ files. Clearly the cache files should not be modified by the application
+ or externally while the job is executing.</p>
+
+ <p>Here is an illustrative example on how to use the
+ <code>DistributedCache</code>:</p>
+ <p><blockquote><pre>
+ // Setting up the cache for the application
+
+ 1. Copy the requisite files to the <code>FileSystem</code>:
+
+ $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
+ $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
+ $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
+ $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
+ $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
+ $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
+
+ 2. Setup the application's <code>JobConf</code>:
+
+ JobConf job = new JobConf();
+ DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
+ job);
+ DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
+ DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
+
+ 3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
+ or {@link org.apache.hadoop.mapred.Reducer}:
+
+ public static class MapClass extends MapReduceBase
+ implements Mapper&lt;K, V, K, V&gt; {
+
+ private Path[] localArchives;
+ private Path[] localFiles;
+
+ public void configure(JobConf job) {
+ // Get the cached archives/files
+ localArchives = DistributedCache.getLocalCacheArchives(job);
+ localFiles = DistributedCache.getLocalCacheFiles(job);
+ }
+
+ public void map(K key, V value,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Use data from the cached archives/files here
+ // ...
+ // ...
+ output.collect(k, v);
+ }
+ }
+
+ </pre></blockquote></p>
+
+ @see org.apache.hadoop.mapred.JobConf
+ @see org.apache.hadoop.mapred.JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.filecache.DistributedCache -->
+</package>
+<package name="org.apache.hadoop.fs">
+ <!-- start class org.apache.hadoop.fs.BlockLocation -->
+ <class name="BlockLocation" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlockLocation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with host, name, offset and length]]>
+ </doc>
+ </constructor>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hosts (hostname) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of names (hostname:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the length of the block]]>
+ </doc>
+ </method>
+ <method name="setOffset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <doc>
+ <![CDATA[Set the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="setLength"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="length" type="long"/>
+ <doc>
+ <![CDATA[Set the length of block]]>
+ </doc>
+ </method>
+ <method name="setHosts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hosts" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the hosts hosting this block]]>
+ </doc>
+ </method>
+ <method name="setNames"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the names (host:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement write of Writable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement readFields of Writable]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BlockLocation -->
+ <!-- start class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <class name="BufferedFSInputStream" extends="java.io.BufferedInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="BufferedFSInputStream" type="org.apache.hadoop.fs.FSInputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a <code>BufferedFSInputStream</code>
+ with the specified buffer size,
+ and saves its argument, the input stream
+ <code>in</code>, for later use. An internal
+ buffer array of length <code>size</code>
+ is created and stored in <code>buf</code>.
+
+ @param in the underlying input stream.
+ @param size the buffer size.
+ @exception IllegalArgumentException if size <= 0.]]>
+ </doc>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A class optimizes reading from FSInputStream by bufferring]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <!-- start class org.apache.hadoop.fs.ChecksumException -->
+ <class name="ChecksumException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumException" type="java.lang.String, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Thrown for checksum errors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumException -->
+ <!-- start class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <class name="ChecksumFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getApproxChkSumLength" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getRawFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the raw file system]]>
+ </doc>
+ </method>
+ <method name="getChecksumFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return the name of the checksum file associated with a file.]]>
+ </doc>
+ </method>
+ <method name="isChecksumFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return true iff file is a checksum file name.]]>
+ </doc>
+ </method>
+ <method name="getChecksumFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileSize" type="long"/>
+ <doc>
+ <![CDATA[Return the length of the checksum file given the size of the
+ actual file.]]>
+ </doc>
+ </method>
+ <method name="getBytesPerSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the bytes Per Checksum]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getChecksumLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ <param name="bytesPerSum" type="int"/>
+ <doc>
+ <![CDATA[Calculated the length of the checksum file in bytes.
+ @param size the length of the data file in bytes
+ @param bytesPerSum the number of bytes in a checksum block
+ @return the number of bytes in the checksum file]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+ Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt>
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename files/dirs]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement the delete(Path, boolean) in checksum
+ file system.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="copyCrc" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ If src and dst are directories, the copyCrc parameter
+ determines whether to copy CRC files.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Report a checksum error to the file system.
+ @param f the file name containing the error
+ @param in the stream open on the file
+ @param inPos the position of the beginning of the bad data in the file
+ @param sums the stream open on the checksum file
+ @param sumsPos the position of the beginning of the bad data in the checksum file
+ @return if retry is neccessary]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract Checksumed FileSystem.
+ It provide a basice implementation of a Checksumed FileSystem,
+ which creates a checksum file for each raw file.
+ It generates & verifies checksums at the client side.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ContentSummary -->
+ <class name="ContentSummary" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ContentSummary"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long, long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the length]]>
+ </doc>
+ </method>
+ <method name="getDirectoryCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the directory count]]>
+ </doc>
+ </method>
+ <method name="getFileCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the file count]]>
+ </doc>
+ </method>
+ <method name="getQuota" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the directory quota]]>
+ </doc>
+ </method>
+ <method name="getSpaceConsumed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retuns (disk) space consumed]]>
+ </doc>
+ </method>
+ <method name="getSpaceQuota" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns (disk) space quota]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHeader" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the header of the output.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the header of the output]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the string representation of the object in the output format.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the string representation of the object]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store the summary of a content (a directory or a file).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ContentSummary -->
+ <!-- start class org.apache.hadoop.fs.DF -->
+ <class name="DF" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DF" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="DF" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFilesystem" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAvailable" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPercentUsed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMount" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="DF_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'df' program.
+ Tested on Linux, FreeBSD, Cygwin.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DF -->
+ <!-- start class org.apache.hadoop.fs.DU -->
+ <class name="DU" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DU" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param interval refresh the disk usage at this interval
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <constructor name="DU" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param conf configuration object
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <method name="decDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Decrease how much disk space we use.
+ @param value decrease by this value]]>
+ </doc>
+ </method>
+ <method name="incDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Increase how much disk space we use.
+ @param value increase by this value]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return disk space used
+ @throws IOException if the shell command fails]]>
+ </doc>
+ </method>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the path of which we're keeping track of disk usage]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Start the disk usage checking thread.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down the refreshing thread.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'du' program]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DU -->
+ <!-- start class org.apache.hadoop.fs.FileChecksum -->
+ <class name="FileChecksum" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FileChecksum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getAlgorithmName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The checksum algorithm name]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The length of the checksum in bytes]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The value of the checksum in bytes]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true if both the algorithms and the values are the same.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An abstract class representing file checksums for files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileChecksum -->
+ <!-- start class org.apache.hadoop.fs.FileStatus -->
+ <class name="FileStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <constructor name="FileStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this a directory?
+ @return true if this is a directory]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the block size of the file.
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the replication factor of a file.
+ @return the replication factor of a file.]]>
+ </doc>
+ </method>
+ <method name="getModificationTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the modification time of the file.
+ @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getAccessTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the access time of the file.
+ @return the access time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get FsPermission associated with the file.
+ @return permssion. If a filesystem does not have a notion of permissions
+ or if permissions could not be determined, then default
+ permissions equivalent of "rwxrwxrwx" is returned.]]>
+ </doc>
+ </method>
+ <method name="getOwner" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the owner of the file.
+ @return owner of the file. The string could be empty if there is no
+ notion of owner of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getGroup" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the group associated with the file.
+ @return group for the file. The string could be empty if there is no
+ notion of group of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Sets permission.
+ @param permission if permission is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="owner" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets owner.
+ @param owner if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setGroup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets group.
+ @param group if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare this object to another object
+
+ @param o the object to be compared.
+ @return a negative integer, zero, or a positive integer as this object
+ is less than, equal to, or greater than the specified object.
+
+ @throws ClassCastException if the specified object's is not of
+ type FileStatus]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare if this object is equal to another object
+ @param o the object to be compared.
+ @return true if two file status has the same path name; false if not.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for the object, which is defined as
+ the hash code of the path name.
+
+ @return a hash code value for the path name.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that represents the client side information for a file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileStatus -->
+ <!-- start class org.apache.hadoop.fs.FileSystem -->
+ <class name="FileSystem" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="FileSystem"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseArgs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Consider using {@link GenericOptionsParser} instead.">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="i" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the cmd-line args, starting at i. Remove consumed args
+ from array. We expect param in the form:
+ '-local | -dfs <namenode:port>'
+ @deprecated Consider using {@link GenericOptionsParser} instead.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the configured filesystem implementation.]]>
+ </doc>
+ </method>
+ <method name="getDefaultUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default filesystem URI from a configuration.
+ @param conf the configuration to access
+ @return the uri of the default filesystem]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.net.URI"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="getNamed" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="call #get(URI,Configuration) instead.">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated call #get(URI,Configuration) instead.]]>
+ </doc>
+ </method>
+ <method name="getLocal" return="org.apache.hadoop.fs.LocalFileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the local file syste
+ @param conf the configuration to configure the file system with
+ @return a LocalFileSystem]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the FileSystem for this URI's scheme and authority. The scheme
+ of the URI determines a configuration property name,
+ <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
+ The entire URI is passed to the FileSystem instance's initialize method.]]>
+ </doc>
+ </method>
+ <method name="closeAll"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all cached filesystems. Be sure those filesystems are not
+ used anymore.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a file with the provided permission
+ The permission of the file is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ It is implemented using two RPCs. It is understood that it is inefficient,
+ but the implementation is thread-safe. The other option is to change the
+ value of umask in configuration to be 0, but it is not thread-safe.
+
+ @param fs file system handle
+ @param file the name of the file to be created
+ @param permission the permission of the file
+ @return an output stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a directory with the provided permission
+ The permission of the directory is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ @see #create(FileSystem, Path, FsPermission)
+
+ @param fs file system handle
+ @param dir the name of the directory to be created
+ @param permission the permission of the directory
+ @return true if the directory creation succeeds; false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file to open]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param permission
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize
+ @param progress
+ @throws IOException
+ @see #setPermission(Path, FsPermission)]]>
+ </doc>
+ </method>
+ <method name="createNewFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the given Path as a brand-new zero-length file. If
+ create fails, or if it already existed, return false.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, getConf().getInt("io.file.buffer.size", 4096), null)
+ @param f the existing file to be appended.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, bufferSize, null).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @param progress for reporting progress if it is not null.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get replication.
+
+ @deprecated Use getFileStatus() instead
+ @param src file name
+ @return file replication
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file.
+
+ @param f the path to delete.
+ @param recursive if path is a directory and set to
+ true, the directory is deleted else throws an exception. In
+ case of a file the recursive can be set to either true or false.
+ @return true if delete is successful else false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="deleteOnExit" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a path to be deleted when FileSystem is closed.
+ When the JVM shuts down,
+ all FileSystem objects will be closed automatically.
+ Then,
+ the marked path will be deleted as a result of closing the FileSystem.
+
+ The path has to exist in the file system.
+
+ @param f the path to delete.
+ @return true if deleteOnExit is successful, otherwise false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="processDeleteOnExit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Delete all files that were marked as delete-on-exit. This recursively
+ deletes all files in the specified paths.]]>
+ </doc>
+ </method>
+ <method name="exists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if exists.
+ @param f source file]]>
+ </doc>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[True iff the named path is a regular file.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the {@link ContentSummary} of a given {@link Path}.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given path using the user-supplied path
+ filter.
+
+ @param f
+ a path name
+ @param filter
+ the user-supplied path filter
+ @return an array of FileStatus objects for the files under the given path
+ after applying the filter
+ @throws IOException
+ if encounter any problem while fetching the status]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using default
+ path filter.
+
+ @param files
+ a list of paths
+ @return a list of statuses for the files under the given paths after
+ applying the filter default Path filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using user-supplied
+ path filter.
+
+ @param files
+ a list of paths
+ @param filter
+ the user-supplied path filter
+ @return a list of statuses for the files under the given paths after
+ applying the filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Return all the files that match filePattern and are not checksum
+ files. Results are sorted by their names.
+
+ <p>
+ A filename pattern is composed of <i>regular</i> characters and
+ <i>special pattern matching</i> characters, which are:
+
+ <dl>
+ <dd>
+ <dl>
+ <p>
+ <dt> <tt> ? </tt>
+ <dd> Matches any single character.
+
+ <p>
+ <dt> <tt> * </tt>
+ <dd> Matches zero or more characters.
+
+ <p>
+ <dt> <tt> [<i>abc</i>] </tt>
+ <dd> Matches a single character from character set
+ <tt>{<i>a,b,c</i>}</tt>.
+
+ <p>
+ <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
+ <dd> Matches a single character from the character range
+ <tt>{<i>a...b</i>}</tt>. Note that character <tt><i>a</i></tt> must be
+ lexicographically less than or equal to character <tt><i>b</i></tt>.
+
+ <p>
+ <dt> <tt> [^<i>a</i>] </tt>
+ <dd> Matches a single character that is not from character set or range
+ <tt>{<i>a</i>}</tt>. Note that the <tt>^</tt> character must occur
+ immediately to the right of the opening bracket.
+
+ <p>
+ <dt> <tt> \<i>c</i> </tt>
+ <dd> Removes (escapes) any special meaning of character <i>c</i>.
+
+ <p>
+ <dt> <tt> {ab,cd} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt>
+
+ <p>
+ <dt> <tt> {ab,c{de,fh}} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cde, cfh</i>}</tt>
+
+ </dl>
+ </dd>
+ </dl>
+
+ @param pathPattern a regular expression specifying a pth pattern
+
+ @return an array of paths that match the path pattern
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array of FileStatus objects whose path names match pathPattern
+ and is accepted by the user-supplied path filter. Results are sorted by
+ their path names.
+ Return null if pathPattern has no glob and the path does not exist.
+ Return an empty array if pathPattern has a glob and no path matches it.
+
+ @param pathPattern
+ a regular expression specifying the path pattern
+ @param filter
+ a user-supplied path filter
+ @return an array of FileStatus objects
+ @throws IOException if any I/O error occurs when fetching file status]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the current user's home directory in this filesystem.
+ The default implementation returns "/user/$USER/".]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param new_dir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #mkdirs(Path, FsPermission)} with default permission.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make the given file and all non-existent parents into
+ directories. Has the semantics of Unix 'mkdir -p'.
+ Existence of the directory hierarchy is not an error.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name and the source is kept intact afterwards]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files are on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="moveToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ Remove the source afterwards]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[No more filesystem operations are needed. Will
+ release any held locks.]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total size of all files in the filesystem.]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a file status object that represents the path.
+ @param f The path we want information from
+ @return a FileStatus object
+ @throws FileNotFoundException when the path does not exist;
+ IOException see specific implementation]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the checksum of a file.
+
+ @param f The file path
+ @return The file checksum. The default return value is null,
+ which indicates that no checksum algorithm is implemented
+ in the corresponding FileSystem.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permission of a path.
+ @param p
+ @param permission]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set owner of a path (i.e. a file or a directory).
+ The parameters username and groupname cannot both be null.
+ @param p The path
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set access time of a file
+ @param p The path
+ @param mtime Set the modification time of this file.
+ The number of milliseconds since Jan 1, 1970.
+ A value of -1 means that this call should not set modification time.
+ @param atime Set the access time of this file.
+ The number of milliseconds since Jan 1, 1970.
+ A value of -1 means that this call should not set access time.]]>
+ </doc>
+ </method>
+ <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class&lt;? extends org.apache.hadoop.fs.FileSystem&gt;"/>
+ <doc>
+ <![CDATA[Get the statistics for a particular file system
+ @param cls the class to lookup
+ @return a statistics object]]>
+ </doc>
+ </method>
+ <method name="printStatistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="statistics" type="org.apache.hadoop.fs.FileSystem.Statistics"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The statistics for this file system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An abstract base class for a fairly generic filesystem. It
+ may be implemented as a distributed filesystem, or as a "local"
+ one that reflects the locally-connected disk. The local version
+ exists for small Hadoop instances and for testing.
+
+ <p>
+
+ All user code that may potentially use the Hadoop Distributed
+ File System should be written to use a FileSystem object. The
+ Hadoop DFS is a multi-machine system that appears as a single
+ disk. It's useful because of its fault tolerance and potentially
+ very large capacity.
+
+ <p>
+ The local implementation is {@link LocalFileSystem} and distributed
+ implementation is DistributedFileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem -->
+ <!-- start class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <class name="FileSystem.Statistics" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="incrementBytesRead"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes read in the statistics
+ @param newBytes the additional bytes read]]>
+ </doc>
+ </method>
+ <method name="incrementBytesWritten"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes written in the statistics
+ @param newBytes the additional bytes written]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes read
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes written
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <!-- start class org.apache.hadoop.fs.FileUtil -->
+ <class name="FileUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path
+
+ @param stats
+ an array of FileStatus objects
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path.
+ If stats if null, return path
+ @param stats
+ an array of FileStatus objects
+ @param path
+ default path to return in stats is null
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="fullyDelete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a directory and all its contents. If
+ we return false, the directory may be partially-deleted.]]>
+ </doc>
+ </method>
+ <method name="fullyDelete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link FileSystem#delete(Path, boolean)}">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recursively delete a directory.
+
+ @param fs {@link FileSystem} on which the path is present
+ @param dir directory to recursively delete
+ @throws IOException
+ @deprecated Use {@link FileSystem#delete(Path, boolean)}]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copyMerge" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dstFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="addString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy all files in a directory to one output file (merge).]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy local files to a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="java.io.File"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy FileSystem files to local files.]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param filename The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <param name="makeCanonicalPath" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @param makeCanonicalPath
+ Whether to make canonical path for the file passed
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="getDU" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Takes an input dir and returns the du on that local directory. Very basic
+ implementation.
+
+ @param dir
+ The input dir to get the disk space of this local dir
+ @return The total disk space of the input local directory]]>
+ </doc>
+ </method>
+ <method name="unZip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="unzipDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a File input it will unzip the file in a the unzip directory
+ passed as the second parameter
+ @param inFile The zip file as input
+ @param unzipDir The unzip directory where to unzip the zip file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unTar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="untarDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a Tar File as input it will untar the file in a the untar directory
+ passed as the second parameter
+
+ This utility will untar ".tar" files and ".tar.gz","tgz" files.
+
+ @param inFile The tar file as input.
+ @param untarDir The untar directory where to untar the tar file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="symLink" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="linkname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a soft link between a src and destination
+ only on a local disk. HDFS does not support this
+ @param target the target for symlink
+ @param linkname the symlink
+ @return value returned by the command]]>
+ </doc>
+ </method>
+ <method name="chmod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="perm" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Change the permissions on a filename.
+ @param filename the name of the file to change
+ @param perm the permission string
+ @return the exit code from the command
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="createLocalTempFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="basefile" type="java.io.File"/>
+ <param name="prefix" type="java.lang.String"/>
+ <param name="isDeleteOnExit" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a tmp file for a base file.
+ @param basefile the base file of the tmp
+ @param prefix file name prefix of tmp
+ @param isDeleteOnExit if true, the tmp will be deleted when the VM exits
+ @return a newly created tmp file
+ @exception IOException If a tmp file cannot created
+ @see java.io.File#createTempFile(String, String, File)
+ @see java.io.File#deleteOnExit()]]>
+ </doc>
+ </method>
+ <method name="replaceFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="target" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move the src file to the name specified by target.
+ @param src the source file
+ @param target the target file
+ @exception IOException If this operation fails]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of file-processing util methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil -->
+ <!-- start class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <class name="FileUtil.HardLink" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil.HardLink"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createHardLink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.io.File"/>
+ <param name="linkName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a hardlink]]>
+ </doc>
+ </method>
+ <method name="getLinkCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Retrieves the number of links to the specified file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Class for creating hardlinks.
+ Supports Unix, Cygwin, WindXP.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <!-- start class org.apache.hadoop.fs.FilterFileSystem -->
+ <class name="FilterFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FilterFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FilterFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List files in a directory.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param newDir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get file status.]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A <code>FilterFileSystem</code> contains
+ some other file system, which it uses as
+ its basic file system, possibly transforming
+ the data along the way or providing additional
+ functionality. The class <code>FilterFileSystem</code>
+ itself simply overrides all methods of
+ <code>FileSystem</code> with versions that
+ pass all requests to the contained file
+ system. Subclasses of <code>FilterFileSystem</code>
+ may further override some of these methods
+ and may also provide additional methods
+ and fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FilterFileSystem -->
+ <!-- start class org.apache.hadoop.fs.FSDataInputStream -->
+ <class name="FSDataInputStream" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSDataInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="desired" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
+ and buffers input through a {@link BufferedInputStream}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSDataOutputStream -->
+ <class name="FSDataOutputStream" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Syncable"/>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWrappedStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link OutputStream} in a {@link DataOutputStream},
+ buffers output through a {@link BufferedOutputStream} and creates a checksum
+ file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataOutputStream -->
+ <!-- start class org.apache.hadoop.fs.FSError -->
+ <class name="FSError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Thrown for unexpected filesystem errors, presumed to reflect disk errors
+ in the native filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSError -->
+ <!-- start class org.apache.hadoop.fs.FSInputChecker -->
+ <class name="FSInputChecker" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs]]>
+ </doc>
+ </constructor>
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int, boolean, java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs
+ @param sum the type of Checksum engine
+ @param chunkSize maximun chunk size
+ @param checksumSize the number byte of each checksum]]>
+ </doc>
+ </constructor>
+ <method name="readChunk" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads in next checksum chunk data into <code>buf</code> at <code>offset</code>
+ and checksum into <code>checksum</code>.
+ The method is used for implementing read, therefore, it should be optimized
+ for sequential reading
+ @param pos chunkPos
+ @param buf desitination buffer
+ @param offset offset in buf at which to store data
+ @param len maximun number of bytes to read
+ @return number of bytes read]]>
+ </doc>
+ </method>
+ <method name="getChunkPosition" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <doc>
+ <![CDATA[Return position of beginning of chunk containing pos.
+
+ @param pos a postion in the file
+ @return the starting position of the chunk which contains the byte]]>
+ </doc>
+ </method>
+ <method name="needChecksum" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if there is a need for checksum verification]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read one checksum-verified byte
+
+ @return the next byte of data, or <code>-1</code> if the end of the
+ stream is reached.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read checksum verified bytes from this byte-input stream into
+ the specified byte array, starting at the given offset.
+
+ <p> This method implements the general contract of the corresponding
+ <code>{@link InputStream#read(byte[], int, int) read}</code> method of
+ the <code>{@link InputStream}</code> class. As an additional
+ convenience, it attempts to read as many bytes as possible by repeatedly
+ invoking the <code>read</code> method of the underlying stream. This
+ iterated <code>read</code> continues until one of the following
+ conditions becomes true: <ul>
+
+ <li> The specified number of bytes have been read,
+
+ <li> The <code>read</code> method of the underlying stream returns
+ <code>-1</code>, indicating end-of-file.
+
+ </ul> If the first <code>read</code> on the underlying stream returns
+ <code>-1</code> to indicate end-of-file then this method returns
+ <code>-1</code>. Otherwise this method returns the number of bytes
+ actually read.
+
+ @param b destination buffer.
+ @param off offset at which to start storing bytes.
+ @param len maximum number of bytes to read.
+ @return the number of bytes read, or <code>-1</code> if the end of
+ the stream has been reached.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if any checksum error occurs]]>
+ </doc>
+ </method>
+ <method name="checksum2long" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="checksum" type="byte[]"/>
+ <doc>
+ <![CDATA[Convert a checksum byte array to a long]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over and discards <code>n</code> bytes of data from the
+ input stream.
+
+ <p>This method may skip more bytes than are remaining in the backing
+ file. This produces no exception and the number of bytes skipped
+ may include some number of bytes that were beyond the EOF of the
+ backing file. Attempting to read from the stream after skipping past
+ the end will result in -1 indicating the end of the file.
+
+<p>If <code>n</code> is negative, no bytes are skipped.
+
+ @param n the number of bytes to be skipped.
+ @return the actual number of bytes skipped.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to skip to is corrupted]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given position in the stream.
+ The next read() will be from that position.
+
+ <p>This method may seek past the end of the file.
+ This produces no exception and an attempt to read from
+ the stream will result in -1 indicating the end of the file.
+
+ @param pos the postion to seek to.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to seek to is corrupted]]>
+ </doc>
+ </method>
+ <method name="readFully" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="stm" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A utility function that tries to read up to <code>len</code> bytes from
+ <code>stm</code>
+
+ @param stm an input stream
+ @param buf destiniation buffer
+ @param offset offset at which to store data
+ @param len number of bytes to read
+ @return actual number of bytes read
+ @throws IOException if there is any IO error]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sum" type="java.util.zip.Checksum"/>
+ <param name="maxChunkSize" type="int"/>
+ <param name="checksumSize" type="int"/>
+ <doc>
+ <![CDATA[Set the checksum related parameters
+ @param sum which type of checksum to use
+ @param maxChunkSize maximun chunk size
+ @param checksumSize checksum size]]>
+ </doc>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="readlimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="file" type="org.apache.hadoop.fs.Path"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file name from which data is read from]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This is a generic input stream for verifying checksums for
+ data before it is read by a user.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputChecker -->
+ <!-- start class org.apache.hadoop.fs.FSInputStream -->
+ <class name="FSInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSInputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="seek"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[FSInputStream is a generic old InputStream with a little bit
+ of RAF-style seek ability.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSOutputSummer -->
+ <class name="FSOutputSummer" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSOutputSummer" type="java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="writeChunk"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write one byte]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes <code>len</code> bytes from the specified byte array
+ starting at offset <code>off</code> and generate a checksum for
+ each data chunk.
+
+ <p> This method stores bytes from the given array into this
+ stream's buffer before it gets checksumed. The buffer gets checksumed
+ and flushed to the underlying output stream when all data
+ in a checksum chunk are in the buffer. If the buffer is empty and
+ requested length is at least as large as the size of next checksum chunk
+ size, this method will checksum and write the chunk directly
+ to the underlying output stream. Thus it avoids uneccessary data copy.
+
+ @param b the data.
+ @param off the start offset in the data.
+ @param len the number of bytes to write.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="convertToByteStream" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sum" type="java.util.zip.Checksum"/>
+ <param name="checksumSize" type="int"/>
+ <doc>
+ <![CDATA[Converts a checksum integer value to a byte stream]]>
+ </doc>
+ </method>
+ <method name="resetChecksumChunk"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Resets existing buffer with a new one of the specified size.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is a generic output stream for generating checksums for
+ data before it is written to the underlying stream]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSOutputSummer -->
+ <!-- start class org.apache.hadoop.fs.FsShell -->
+ <class name="FsShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="FsShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentTrashDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the Trash object associated with this shell.]]>
+ </doc>
+ </method>
+ <method name="byteDesc" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="long"/>
+ <doc>
+ <![CDATA[Return an abbreviated English-language desc of the byte length]]>
+ </doc>
+ </method>
+ <method name="limitDecimalTo2" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dateForm" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="modifFmt" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provide command line access to a FileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsShell -->
+ <!-- start class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <class name="FsUrlStreamHandlerFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.net.URLStreamHandlerFactory"/>
+ <constructor name="FsUrlStreamHandlerFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsUrlStreamHandlerFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createURLStreamHandler" return="java.net.URLStreamHandler"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Factory for URL stream handlers.
+
+ There is only one handler whose job is to create UrlConnections. A
+ FsUrlConnection relies on FileSystem to choose the appropriate FS
+ implementation.
+
+ Before returning our handler, we make sure that FileSystem knows an
+ implementation for the requested scheme/protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <!-- start class org.apache.hadoop.fs.HarFileSystem -->
+ <class name="HarFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HarFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[public construction of harfilesystem]]>
+ </doc>
+ </constructor>
+ <constructor name="HarFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor to create a HarFileSystem with an
+ underlying filesystem.
+ @param fs]]>
+ </doc>
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a Har filesystem per har archive. The
+ archive home directory is the top level directory
+ in the filesystem that contains the HAR archive.
+ Be careful with this method, you do not want to go
+ on creating new Filesystem instances per call to
+ path.getFileSystem().
+ the uri of Har is
+ har://underlyingfsscheme-host:port/archivepath.
+ or
+ har:///archivepath. This assumes the underlying filesystem
+ to be used in case not specified.]]>
+ </doc>
+ </method>
+ <method name="getHarVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive.]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the uri of this filesystem.
+ The uri is of the form
+ har://underlyingfsschema-host:port/pathintheunderlyingfs]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[get block locations from the underlying fs
+ @param file the input filestatus to get block locations
+ @param start the start in the file
+ @param len the length in the file
+ @return block locations for this segment of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getHarHash" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[the hash of the path p inside iniside
+ the filesystem
+ @param p the path in the harfilesystem
+ @return the hash code of the path.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[return the filestatus of files in har archive.
+ The permission returned are that of the archive
+ index files. The permissions are not persisted
+ while creating a hadoop archive.
+ @param f the path in har filesystem
+ @return filestatus.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a har input stream which fakes end of
+ file. It reads the index files to get the part
+ file name and the size and start of the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[liststatus returns the children of a directory
+ after looking up the index files.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive path.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[copies the file in the har filesystem to a local file.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permisssion" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <field name="VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is an implementation of the Hadoop Archive
+ Filesystem. This archive Filesystem has index files
+ of the form _index* and has contents of the form
+ part-*. The index files store the indexes of the
+ real files. The index files are of the form _masterindex
+ and _index. The master index is a level of indirection
+ in to the index file to make the look ups faster. the index
+ file is sorted with hash code of the paths that it contains
+ and the master index contains pointers to the positions in
+ index for ranges of hashcodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.HarFileSystem -->
+ <!-- start class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <class name="InMemoryFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InMemoryFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InMemoryFileSystem" type="java.net.URI, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reserveSpaceWithCheckSum" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Register a file with its size. This will also register a checksum for the
+ file that the user is trying to create. This is required since none of
+ the FileSystem APIs accept the size of the file as argument. But since it
+ is required for us to apriori know the size of the file we are going to
+ create, the user must call this method for each file he wants to create
+ and reserve memory for that file. We either succeed in reserving memory
+ for both the main file and the checksum file and return true, or return
+ false.]]>
+ </doc>
+ </method>
+ <method name="getFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getNumFiles" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getFSSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPercentUsed" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of the in-memory filesystem. This implementation assumes
+ that the file lengths are known ahead of time and the total lengths of all
+ the files is below a certain number (like 100 MB, configurable). Use the API
+ reserveSpaceWithCheckSum(Path f, int size) (see below for a description of
+ the API for reserving space in the FS. The uri of this filesystem starts with
+ ramfs:// .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <!-- start class org.apache.hadoop.fs.LocalDirAllocator -->
+ <class name="LocalDirAllocator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalDirAllocator" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an allocator object
+ @param contextCfgItemName]]>
+ </doc>
+ </constructor>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. This method should be used if the size of
+ the file is not known apriori. We go round-robin over the set of disks
+ (via the configured dirs) and return the first complete path where
+ we could create the parent directory of the passed path.
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. Pass size as -1 if not known apriori. We
+ round-robin over the set of disks (via the configured dirs) and return
+ the first complete path which has enough space
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathToRead" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS for reading. We search through all the
+ configured dirs for the file's existence and return the complete
+ path to the file when we find one
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createTmpFileForWrite" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a temporary file in the local FS. Pass size as -1 if not known
+ apriori. We round-robin over the set of disks (via the configured dirs)
+ and select the first complete path which has enough space. A file is
+ created on this directory. The file is guaranteed to go away when the
+ JVM exits.
+ @param pathStr prefix for the temporary file
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return a unique temporary file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isContextValid" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextCfgItemName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Method to check whether a context is valid
+ @param contextCfgItemName
+ @return true/false]]>
+ </doc>
+ </method>
+ <method name="ifExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[We search through all the configured dirs for the file's existence
+ and return true when we find
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return true if files exist. false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of a round-robin scheme for disk allocation for creating
+ files. The way it works is that it is kept track what disk was last
+ allocated for a file write. For the current request, the next disk from
+ the set of disks would be allocated if the free space on the disk is
+ sufficient enough to accomodate the file that is being considered for
+ creation. If the space requirements cannot be met, the next disk in order
+ would be tried and so on till a disk is found with sufficient capacity.
+ Once a disk with sufficient space is identified, a check is done to make
+ sure that the disk is writable. Also, there is an API provided that doesn't
+ take the space requirements into consideration but just checks whether the
+ disk under consideration is writable (this should be used for cases where
+ the file size is not known apriori). An API is provided to read a path that
+ was created earlier. That API works by doing a scan of all the disks for the
+ input pathname.
+ This implementation also provides the functionality of having multiple
+ allocators per JVM (one for each unique functionality or context, like
+ mapred, dfs-client, etc.). It ensures that there is only one instance of
+ an allocator per context per JVM.
+ Note:
+ 1. The contexts referred above are actually the configuration items defined
+ in the Configuration class like "mapred.local.dir" (for which we want to
+ control the dir allocations). The context-strings are exactly those
+ configuration items.
+ 2. This implementation does not take into consideration cases where
+ a disk becomes read-only or goes out of space while a file is being written
+ to (disks are shared between multiple processes, and so the latter situation
+ is probable).
+ 3. In the class implementation, "Disk" is referred to as "Dir", which
+ actually points to the configured directory on the Disk which will be the
+ parent for all file write/read allocations.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalDirAllocator -->
+ <!-- start class org.apache.hadoop.fs.LocalFileSystem -->
+ <class name="LocalFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocalFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRaw" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Moves files to a bad file directory on the same device, so that their
+ storage will not be reused.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the checksumed local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalFileSystem -->
+ <!-- start class org.apache.hadoop.fs.MD5MD5CRC32FileChecksum -->
+ <class name="MD5MD5CRC32FileChecksum" extends="org.apache.hadoop.fs.FileChecksum"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MD5MD5CRC32FileChecksum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Same as this(0, 0, null)]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5MD5CRC32FileChecksum" type="int, long, org.apache.hadoop.io.MD5Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a MD5FileChecksum]]>
+ </doc>
+ </constructor>
+ <method name="getAlgorithmName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="xml" type="org.znerd.xmlenc.XMLOutputter"/>
+ <param name="that" type="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write that object to xml output.]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attrs" type="org.xml.sax.Attributes"/>
+ <exception name="SAXException" type="org.xml.sax.SAXException"/>
+ <doc>
+ <![CDATA[Return the object represented in the attributes.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LENGTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[MD5 of MD5 of CRC32.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.MD5MD5CRC32FileChecksum -->
+ <!-- start class org.apache.hadoop.fs.Path -->
+ <class name="Path" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="Path" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a path from a String. Path strings are URIs, but with
+ unescaped elements and some additional normalization.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Path from components.]]>
+ </doc>
+ </constructor>
+ <method name="toUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this to a URI.]]>
+ </doc>
+ </method>
+ <method name="getFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the FileSystem that owns this Path.]]>
+ </doc>
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True if the directory of this path is absolute.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the final component of this path.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the parent of a path or null if at root.]]>
+ </doc>
+ </method>
+ <method name="suffix" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a suffix to the final name in the path.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="depth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of elements in this path.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <doc>
+ <![CDATA[Returns a qualified path object.]]>
+ </doc>
+ </method>
+ <field name="SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The directory separator, a slash.]]>
+ </doc>
+ </field>
+ <field name="SEPARATOR_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CUR_DIR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Names a file or directory in a {@link FileSystem}.
+ Path strings use slash as the directory separator. A path string is
+ absolute if it begins with a slash.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Path -->
+ <!-- start interface org.apache.hadoop.fs.PathFilter -->
+ <interface name="PathFilter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Tests whether or not the specified abstract pathname should be
+ included in a pathname list.
+
+ @param path The abstract pathname to be tested
+ @return <code>true</code> if and only if <code>pathname</code>
+ should be included]]>
+ </doc>
+ </method>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PathFilter -->
+ <!-- start interface org.apache.hadoop.fs.PositionedReadable -->
+ <interface name="PositionedReadable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read upto the specified number of bytes, from a given
+ position within a file, and return the number of bytes read. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the specified number of bytes, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read number of bytes equalt to the length of the buffer, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits positional reading.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PositionedReadable -->
+ <!-- start class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <class name="RawLocalFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RawLocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the specified directory hierarchy. Does not
+ treat existence as an error.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsWorkingFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chown to set owner.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chmod to set permission.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the raw local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <!-- start interface org.apache.hadoop.fs.Seekable -->
+ <interface name="Seekable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits seeking.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Seekable -->
+ <!-- start interface org.apache.hadoop.fs.Syncable -->
+ <interface name="Syncable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Synchronize all buffer with the underlying devices.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface declare the sync() operation.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Syncable -->
+ <!-- start class org.apache.hadoop.fs.Trash -->
+ <class name="Trash" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Trash" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a trash can accessor.
+ @param conf a Configuration]]>
+ </doc>
+ </constructor>
+ <method name="moveToTrash" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move a file or directory to the current trash directory.
+ @return false if the item is already in the trash or trash is disabled]]>
+ </doc>
+ </method>
+ <method name="checkpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a trash checkpoint.]]>
+ </doc>
+ </method>
+ <method name="expunge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete old checkpoints.]]>
+ </doc>
+ </method>
+ <method name="getEmptier" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a {@link Runnable} that periodically empties the trash of all
+ users, intended to be run by the superuser. Only one checkpoint is kept
+ at a time.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Run an emptier.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provides a <i>trash</i> feature. Files are moved to a user's trash
+ directory, a subdirectory of their home directory named ".Trash". Files are
+ initially moved to a <i>current</i> sub-directory of the trash directory.
+ Within that sub-directory their original path is preserved. Periodically
+ one may checkpoint the current trash and remove older checkpoints. (This
+ design permits trash management without enumeration of the full trash
+ content, without date support in the filesystem, and without clock
+ synchronization.)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Trash -->
+</package>
+<package name="org.apache.hadoop.fs.ftp">
+ <!-- start class org.apache.hadoop.fs.ftp.FTPException -->
+ <class name="FTPException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.String, java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A class to wrap a {@link Throwable} into a Runtime Exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPException -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <class name="FTPFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A stream obtained via this call must be closed before using other APIs of
+ this class or else the invocation will block.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BLOCK_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} backed by an FTP client provided by <a
+ href="http://commons.apache.org/net/">Apache Commons Net</a>.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPInputStream -->
+ <class name="FTPInputStream" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPInputStream" type="java.io.InputStream, org.apache.commons.net.ftp.FTPClient, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readLimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPInputStream -->
+</package>
+<package name="org.apache.hadoop.fs.kfs">
+ <!-- start class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+ <class name="KosmosFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="KosmosFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return null if the file doesn't exist; otherwise, get the
+ locations of the various chunks of the file file from KFS.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A FileSystem backed by KFS.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.permission">
+ <!-- start class org.apache.hadoop.fs.permission.AccessControlException -->
+ <class name="AccessControlException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AccessControlException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor is needed for unwrapping from
+ {@link org.apache.hadoop.ipc.RemoteException}.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an {@link AccessControlException}
+ with the specified detail message.
+ @param s the detail message.]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.AccessControlException -->
+ <!-- start class org.apache.hadoop.fs.permission.FsAction -->
+ <class name="FsAction" extends="java.lang.Enum&lt;org.apache.hadoop.fs.permission.FsAction&gt;"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.fs.permission.FsAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="implies" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[Return true if this action implies that action.
+ @param that]]>
+ </doc>
+ </method>
+ <method name="and" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[AND operation.]]>
+ </doc>
+ </method>
+ <method name="or" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[OR operation.]]>
+ </doc>
+ </method>
+ <method name="not" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[NOT operation.]]>
+ </doc>
+ </method>
+ <field name="INDEX" type="int"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Octal representation]]>
+ </doc>
+ </field>
+ <field name="SYMBOL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Symbolic representation]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[File system actions, e.g. read, write, etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsAction -->
+ <!-- start class org.apache.hadoop.fs.permission.FsPermission -->
+ <class name="FsPermission" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given {@link FsAction}.
+ @param u user action
+ @param g group action
+ @param o other action]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="short"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given mode.
+ @param mode
+ @see #toShort()]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor
+
+ @param other other permission]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="permission" type="short"/>
+ <doc>
+ <![CDATA[Create an immutable {@link FsPermission} object.]]>
+ </doc>
+ </method>
+ <method name="getUserAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getGroupAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getOtherAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return other {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="fromShort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="short"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link FsPermission} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="toShort" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Encode the object to a short.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply a umask to this permission and return a new one]]>
+ </doc>
+ </method>
+ <method name="getUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="setUMask"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Set the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="getDefault" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default permission.]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unixSymbolicPermission" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create a FsPermission from a Unix symbolic permission string
+ @param unixSymbolicPermission e.g. "-rw-rw-rw-"]]>
+ </doc>
+ </method>
+ <field name="UMASK_LABEL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[umask property label]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_UMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A class for file/directory permissions.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsPermission -->
+ <!-- start class org.apache.hadoop.fs.permission.PermissionStatus -->
+ <class name="PermissionStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="PermissionStatus" type="java.lang.String, java.lang.String, org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <param name="group" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Create an immutable {@link PermissionStatus} object.]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user name]]>
+ </doc>
+ </method>
+ <method name="getGroupName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group name]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return permission]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply umask.
+ @see FsPermission#applyUMask(FsPermission)]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link PermissionStatus} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a {@link PermissionStatus} from its base components.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store permission related information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.PermissionStatus -->
+</package>
+<package name="org.apache.hadoop.fs.s3">
+ <!-- start class org.apache.hadoop.fs.s3.Block -->
+ <class name="Block" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Block" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Holds metadata about a block of data being stored in a {@link FileSystemStore}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.Block -->
+ <!-- start interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <interface name="FileSystemStore" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inode" type="org.apache.hadoop.fs.s3.INode"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="inodeExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveINode" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveBlock" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="byteRangeStart" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listSubPaths" return="java.util.Set&lt;org.apache.hadoop.fs.Path&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listDeepSubPaths" return="java.util.Set&lt;org.apache.hadoop.fs.Path&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="purge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete everything. Used for testing.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="dump"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Diagnostic method to dump all INodes to the console.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for storing and retrieving {@link INode}s and {@link Block}s.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <!-- start class org.apache.hadoop.fs.s3.INode -->
+ <class name="INode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="INode" type="org.apache.hadoop.fs.s3.INode.FileType, org.apache.hadoop.fs.s3.Block[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.fs.s3.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileType" return="org.apache.hadoop.fs.s3.INode.FileType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSerializedLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="serialize" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deserialize" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="FILE_TYPES" type="org.apache.hadoop.fs.s3.INode.FileType[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DIRECTORY_INODE" type="org.apache.hadoop.fs.s3.INode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Holds file metadata including type (regular file, or directory),
+ and the list of blocks that are pointers to the data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.INode -->
+ <!-- start class org.apache.hadoop.fs.s3.MigrationTool -->
+ <class name="MigrationTool" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="MigrationTool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ This class is a tool for migrating data from an older to a newer version
+ of an S3 filesystem.
+ </p>
+ <p>
+ All files in the filesystem are migrated by re-writing the block metadata
+ - no datafiles are touched.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.MigrationTool -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Credentials -->
+ <class name="S3Credentials" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Credentials"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@throws IllegalArgumentException if credentials for S3 cannot be
+ determined.]]>
+ </doc>
+ </method>
+ <method name="getAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSecretAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Extracts AWS credentials from the filesystem URI or configuration.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Credentials -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Exception -->
+ <class name="S3Exception" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Exception" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown if there is a problem communicating with Amazon S3.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Exception -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <class name="S3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="S3FileSystem" type="org.apache.hadoop.fs.s3.FileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[FileStatus for S3 file systems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A block-based {@link FileSystem} backed by
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ </p>
+ @see NativeS3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <class name="S3FileSystemException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystemException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when there is a fatal exception while using {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <!-- start class org.apache.hadoop.fs.s3.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="org.apache.hadoop.fs.s3.S3FileSystemException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when Hadoop cannot read the version of the data stored
+ in {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.VersionMismatchException -->
+</package>
+<package name="org.apache.hadoop.fs.s3native">
+ <!-- start class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+ <class name="NativeS3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeS3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NativeS3FileSystem" type="org.apache.hadoop.fs.s3native.NativeFileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ If <code>f</code> is a file, this method will make a single call to S3.
+ If <code>f</code> is a directory, this method will make a maximum of
+ (<i>n</i> / 1000) + 2 calls to S3, where <i>n</i> is the total number of
+ files and directories contained directly in <code>f</code>.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} for reading and writing files stored on
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation
+ stores files on S3 in their
+ native form so they can be read by other S3 tools.
+ </p>
+ @see org.apache.hadoop.fs.s3.S3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.shell">
+ <!-- start class org.apache.hadoop.fs.shell.Command -->
+ <class name="Command" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Command" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the command's name excluding the leading character -]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the command on the input path
+
+ @param path the input path
+ @throws IOException if any error occurs]]>
+ </doc>
+ </method>
+ <method name="runAll" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[For each source path, execute the command
+
+ @return 0 if it runs successfully; -1 if it fails]]>
+ </doc>
+ </method>
+ <field name="args" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract class for the execution of a file system command]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Command -->
+ <!-- start class org.apache.hadoop.fs.shell.CommandFormat -->
+ <class name="CommandFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CommandFormat" type="java.lang.String, int, int, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor]]>
+ </doc>
+ </constructor>
+ <method name="parse" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="pos" type="int"/>
+ <doc>
+ <![CDATA[Parse parameters starting from the given position
+
+ @param args an array of input arguments
+ @param pos the position at which starts to parse
+ @return a list of parameters]]>
+ </doc>
+ </method>
+ <method name="getOpt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="option" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return if the option is set or not
+
+ @param option String representation of an option
+ @return true is the option is set; false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Parse the args of a command and check the format of args.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.CommandFormat -->
+ <!-- start class org.apache.hadoop.fs.shell.Count -->
+ <class name="Count" extends="org.apache.hadoop.fs.shell.Command"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Count" type="java.lang.String[], int, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param cmd the count command
+ @param pos the starting index of the arguments]]>
+ </doc>
+ </constructor>
+ <method name="matches" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Check if a command is the count command
+
+ @param cmd A string representation of a command starting with "-"
+ @return true if this is a count command; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USAGE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DESCRIPTION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Count the number of directories, files, bytes, quota, and remaining quota.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Count -->
+</package>
+<package name="org.apache.hadoop.http">
+ <!-- start interface org.apache.hadoop.http.FilterContainer -->
+ <interface name="FilterContainer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="addFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map&lt;java.lang.String, java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[Add a filter to the container.
+ @param name Filter name
+ @param classname Filter class name
+ @param parameters a map from parameter names to initial values]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A container class for javax.servlet.Filter.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.http.FilterContainer -->
+ <!-- start class org.apache.hadoop.http.FilterInitializer -->
+ <class name="FilterInitializer" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FilterInitializer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Initialize a javax.servlet.Filter.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.http.FilterInitializer -->
+ <!-- start class org.apache.hadoop.http.HttpServer -->
+ <class name="HttpServer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.http.FilterContainer"/>
+ <constructor name="HttpServer" type="java.lang.String, java.lang.String, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as this(name, bindAddress, port, findPort, null);]]>
+ </doc>
+ </constructor>
+ <constructor name="HttpServer" type="java.lang.String, java.lang.String, int, boolean, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a status server on the given port.
+ The jsp scripts are taken from src/webapps/<name>.
+ @param name The name of the server
+ @param port The port to use on the server
+ @param findPort whether the server should start at the given port and
+ increment by 1 until it finds a free port.
+ @param conf Configuration]]>
+ </doc>
+ </constructor>
+ <method name="addDefaultApps"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="appDir" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add default apps.
+ @param appDir The application directory
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="addDefaultServlets"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Add default servlets.]]>
+ </doc>
+ </method>
+ <method name="addContext"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="dir" type="java.lang.String"/>
+ <param name="isFiltered" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a context
+ @param pathSpec The path spec for the context
+ @param dir The directory containing the context
+ @param isFiltered if true, the servlet is added to the filter path mapping
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Set a value in the webapp context. These values are available to the jsp
+ pages as "application.getAttribute(name)".
+ @param name The name of the attribute
+ @param value The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="addServlet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="clazz" type="java.lang.Class&lt;? extends javax.servlet.http.HttpServlet&gt;"/>
+ <doc>
+ <![CDATA[Add a servlet in the server.
+ @param name The name of the servlet (can be passed as null)
+ @param pathSpec The path spec for the servlet
+ @param clazz The servlet class]]>
+ </doc>
+ </method>
+ <method name="addInternalServlet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="this is a temporary method">
+ <param name="name" type="java.lang.String"/>
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="clazz" type="java.lang.Class&lt;? extends javax.servlet.http.HttpServlet&gt;"/>
+ <doc>
+ <![CDATA[Add an internal servlet in the server.
+ @param name The name of the servlet (can be passed as null)
+ @param pathSpec The path spec for the servlet
+ @param clazz The servlet class
+ @deprecated this is a temporary method]]>
+ </doc>
+ </method>
+ <method name="addFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map&lt;java.lang.String, java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="defineFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="ctx" type="org.mortbay.jetty.servlet.WebApplicationContext"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map&lt;java.lang.String, java.lang.String&gt;"/>
+ <param name="urls" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Define a filter for a context and set up default url mappings.]]>
+ </doc>
+ </method>
+ <method name="addFilterPathMapping"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pathSpec" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add the path spec to the filter path mapping.
+ @param pathSpec The path spec]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value in the webapp context.
+ @param name The name of the attribute
+ @return The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="getWebAppsPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the pathname to the webapps files.
+ @return the pathname as a URL
+ @throws IOException if 'webapps' directory cannot be found on CLASSPATH.]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the port that the server is on
+ @return the port]]>
+ </doc>
+ </method>
+ <method name="setThreads"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="min" type="int"/>
+ <param name="max" type="int"/>
+ </method>
+ <method name="addSslListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="keystore" type="java.lang.String"/>
+ <param name="storPass" type="java.lang.String"/>
+ <param name="keyPass" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Configure an ssl listener on the server.
+ @param addr address to listen on
+ @param keystore location of the keystore
+ @param storPass password for the keystore
+ @param keyPass password for the key]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start the server. Does not wait for the server to start.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[stop the server]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="webServer" type="org.mortbay.jetty.Server"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="webAppContext" type="org.mortbay.jetty.servlet.WebApplicationContext"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="defaultContexts" type="java.util.Map&lt;org.mortbay.jetty.servlet.WebApplicationContext, java.lang.Boolean&gt;"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="findPort" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="listener" type="org.mortbay.http.SocketListener"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="filterNames" type="java.util.List&lt;java.lang.String&gt;"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Create a Jetty embedded server to answer http requests. The primary goal
+ is to serve up status information for the server.
+ There are three contexts:
+ "/logs/" -> points to the log directory
+ "/static/" -> points to common static files (src/webapps/static)
+ "/" -> the jsp server code from (src/webapps/<name>)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.http.HttpServer -->
+ <!-- start class org.apache.hadoop.http.HttpServer.StackServlet -->
+ <class name="HttpServer.StackServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HttpServer.StackServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A very simple servlet to serve up a text representation of the current
+ stack traces. It both returns the stacks to the caller and logs them.
+ Currently the stack traces are done sequentially rather than exactly the
+ same data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.http.HttpServer.StackServlet -->
+</package>
+<package name="org.apache.hadoop.io">
+ <!-- start class org.apache.hadoop.io.AbstractMapWritable -->
+ <class name="AbstractMapWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="AbstractMapWritable"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor.]]>
+ </doc>
+ </constructor>
+ <method name="addToMap"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add a Class to the maps if it is not already present.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="byte"/>
+ <doc>
+ <![CDATA[@return the Class class for the specified id]]>
+ </doc>
+ </method>
+ <method name="getId" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[@return the id for the specified Class]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Used by child copy constructors.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the conf]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@param conf the conf to set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract base class for MapWritable and SortedMapWritable
+
+ Unlike org.apache.nutch.crawl.MapWritable, this class allows creation of
+ MapWritable&lt;Writable, MapWritable&gt; so the CLASS_TO_ID and ID_TO_CLASS
+ maps travel with the class instead of being static.
+
+ Class ids range from 1 to 127 so there can be at most 127 distinct classes
+ in any specific map instance.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.AbstractMapWritable -->
+ <!-- start class org.apache.hadoop.io.ArrayFile -->
+ <class name="ArrayFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A dense file-based mapping from integers to values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Reader -->
+ <class name="ArrayFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an array reader for the named file.]]>
+ </doc>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader before its <code>n</code>th value.]]>
+ </doc>
+ </method>
+ <method name="next" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read and return the next value in the file.]]>
+ </doc>
+ </method>
+ <method name="key" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the key associated with the most recent call to {@link
+ #seek(long)}, {@link #next(Writable)}, or {@link
+ #get(long,Writable)}.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the <code>n</code>th value in the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Reader -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Writer -->
+ <class name="ArrayFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a value to the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Writer -->
+ <!-- start class org.apache.hadoop.io.ArrayWritable -->
+ <class name="ArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ArrayWritable" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;, org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for arrays containing instances of a class. The elements of this
+ writable must all be instances of the same class. If this writable will be
+ the input for a Reducer, you will need to create a subclass that sets the
+ value to be of the proper type.
+
+ For example:
+ <code>
+ public class IntArrayWritable extends ArrayWritable {
+ public IntArrayWritable() {
+ super(IntWritable.class);
+ }
+ }
+ </code>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayWritable -->
+ <!-- start class org.apache.hadoop.io.BinaryComparable -->
+ <class name="BinaryComparable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable&lt;org.apache.hadoop.io.BinaryComparable&gt;"/>
+ <constructor name="BinaryComparable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLength" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return n st bytes 0..n-1 from {#getBytes()} are valid.]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return representative byte array for this instance.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.BinaryComparable"/>
+ <doc>
+ <![CDATA[Compare bytes from {#getBytes()}.
+ @see org.apache.hadoop.io.WritableComparator#compareBytes(byte[],int,int,byte[],int,int)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Compare bytes from {#getBytes()} to those provided.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true if bytes from {#getBytes()} match.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a hash of the bytes returned from {#getBytes()}.
+ @see org.apache.hadoop.io.WritableComparator#hashBytes(byte[],int)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface supported by {@link org.apache.hadoop.io.WritableComparable}
+ types supporting ordering/permutation by a representative set of bytes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BinaryComparable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable -->
+ <class name="BooleanWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BooleanWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BooleanWritable" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="get" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for booleans.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <class name="BooleanWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BooleanWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BooleanWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.BytesWritable -->
+ <class name="BytesWritable" extends="org.apache.hadoop.io.BinaryComparable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable&lt;org.apache.hadoop.io.BinaryComparable&gt;"/>
+ <constructor name="BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-size sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="BytesWritable" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a BytesWritable using the byte array as the initial value.
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the BytesWritable.
+ @return The data is only valid between 0 and getLength() - 1.]]>
+ </doc>
+ </method>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #getBytes()} instead.">
+ <doc>
+ <![CDATA[Get the data from the BytesWritable.
+ @deprecated Use {@link #getBytes()} instead.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current size of the buffer.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #getLength()} instead.">
+ <doc>
+ <![CDATA[Get the current size of the buffer.
+ @deprecated Use {@link #getLength()} instead.]]>
+ </doc>
+ </method>
+ <method name="setSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Change the size of the buffer. The values in the old range are preserved
+ and any new values are undefined. The capacity is changed if it is
+ necessary.
+ @param size The new number of bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum size that could handled without
+ resizing the backing storage.
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_cap" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved.
+ @param new_cap The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="org.apache.hadoop.io.BytesWritable"/>
+ <doc>
+ <![CDATA[Set the BytesWritable to the contents of the given newData.
+ @param newData the value to set this BytesWritable to.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Set the value to a copy of the given byte range
+ @param newData the new values to copy in
+ @param offset the offset in newData to start at
+ @param length the number of bytes to copy]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="right_obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Are the two byte sequences equal?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Generate the stream of bytes as hex pairs separated by ' '.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is usable as a key or value.
+ It is resizable and distinguishes between the size of the seqeunce and
+ the current capacity. The hash function is the front of the md5 of the
+ buffer. The sort order is the same as memcmp.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable -->
+ <!-- start class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <class name="BytesWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BytesWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BytesWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ByteWritable -->
+ <class name="ByteWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="ByteWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ByteWritable" type="byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Set the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a ByteWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two ByteWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for a single byte.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable -->
+ <!-- start class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <class name="ByteWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ByteWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for ByteWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <!-- start interface org.apache.hadoop.io.Closeable -->
+ <interface name="Closeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="use java.io.Closeable">
+ <implements name="java.io.Closeable"/>
+ <doc>
+ <![CDATA[@deprecated use java.io.Closeable]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Closeable -->
+ <!-- start class org.apache.hadoop.io.CompressedWritable -->
+ <class name="CompressedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="CompressedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ensureInflated"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Must be called by all methods which access fields to ensure that the data
+ has been uncompressed.]]>
+ </doc>
+ </method>
+ <method name="readFieldsCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #readFields(DataInput)}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #write(DataOutput)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base-class for Writables which store themselves compressed and lazily
+ inflate on field access. This is useful for large objects whose fields are
+ not be altered during a map or reduce operation: leaving the field data
+ compressed makes copying the instance from one file to another much
+ faster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.CompressedWritable -->
+ <!-- start class org.apache.hadoop.io.DataInputBuffer -->
+ <class name="DataInputBuffer" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataInputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataInput} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataInputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataInputBuffer buffer = new DataInputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using DataInput methods ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataInputBuffer -->
+ <!-- start class org.apache.hadoop.io.DataOutputBuffer -->
+ <class name="DataOutputBuffer" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataOutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <constructor name="DataOutputBuffer" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a DataInput directly into the buffer.]]>
+ </doc>
+ </method>
+ <method name="writeTo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write to a file stream]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataOutput} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataOutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataOutputBuffer buffer = new DataOutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using DataOutput methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataOutputBuffer -->
+ <!-- start class org.apache.hadoop.io.DefaultStringifier -->
+ <class name="DefaultStringifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Stringifier&lt;T&gt;"/>
+ <constructor name="DefaultStringifier" type="org.apache.hadoop.conf.Configuration, java.lang.Class&lt;T&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="fromString" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="store"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="item" type="K"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the item in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to store
+ @param item the object to be stored
+ @param keyName the name of the key to use
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="load" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class&lt;K&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="storeArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="items" type="K[]"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the array of items in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param items the objects to be stored
+ @param keyName the name of the key to use
+ @throws IndexOutOfBoundsException if the items array is empty
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="loadArray" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class&lt;K&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the array of objects from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[DefaultStringifier is the default implementation of the {@link Stringifier}
+ interface which stringifies the objects using base64 encoding of the
+ serialized version of the objects. The {@link Serializer} and
+ {@link Deserializer} are obtained from the {@link SerializationFactory}.
+ <br>
+ DefaultStringifier offers convenience methods to store/load objects to/from
+ the configuration.
+
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DefaultStringifier -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable -->
+ <class name="DoubleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="DoubleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DoubleWritable" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="double"/>
+ </method>
+ <method name="get" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a DoubleWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Writable for Double values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <class name="DoubleWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DoubleWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for DoubleWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.FloatWritable -->
+ <class name="FloatWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="FloatWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FloatWritable" type="float"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="float"/>
+ <doc>
+ <![CDATA[Set the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a FloatWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two FloatWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for floats.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable -->
+ <!-- start class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <class name="FloatWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FloatWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for FloatWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.GenericWritable -->
+ <class name="GenericWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="GenericWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Set the instance that is wrapped.
+
+ @param obj]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the wrapped instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTypes" return="java.lang.Class[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return all classes that may be wrapped. Subclasses should implement this
+ to return a constant array of classes.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper for Writable instances.
+ <p>
+ When two sequence files, which have same Key type but different Value
+ types, are mapped out to reduce, multiple Value types is not allowed.
+ In this case, this class can help you wrap instances with different types.
+ </p>
+
+ <p>
+ Compared with <code>ObjectWritable</code>, this class is much more effective,
+ because <code>ObjectWritable</code> will append the class declaration as a String
+ into the output file in every Key-Value pair.
+ </p>
+
+ <p>
+ Generic Writable implements {@link Configurable} interface, so that it will be
+ configured by the framework. The configuration is passed to the wrapped objects
+ implementing {@link Configurable} interface <i>before deserialization</i>.
+ </p>
+
+ how to use it: <br>
+ 1. Write your own class, such as GenericObject, which extends GenericWritable.<br>
+ 2. Implements the abstract method <code>getTypes()</code>, defines
+ the classes which will be wrapped in GenericObject in application.
+ Attention: this classes defined in <code>getTypes()</code> method, must
+ implement <code>Writable</code> interface.
+ <br><br>
+
+ The code looks like this:
+ <blockquote><pre>
+ public class GenericObject extends GenericWritable {
+
+ private static Class[] CLASSES = {
+ ClassType1.class,
+ ClassType2.class,
+ ClassType3.class,
+ };
+
+ protected Class[] getTypes() {
+ return CLASSES;
+ }
+
+ }
+ </pre></blockquote>
+
+ @since Nov 8, 2006]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.GenericWritable -->
+ <!-- start class org.apache.hadoop.io.InputBuffer -->
+ <class name="InputBuffer" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link InputStream} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new InputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ InputBuffer buffer = new InputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using InputStream methods ...
+ }
+ </pre>
+ @see DataInputBuffer
+ @see DataOutput]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.InputBuffer -->
+ <!-- start class org.apache.hadoop.io.IntWritable -->
+ <class name="IntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="IntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="IntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a IntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two IntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for ints.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable -->
+ <!-- start class org.apache.hadoop.io.IntWritable.Comparator -->
+ <class name="IntWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IntWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for IntWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.IOUtils -->
+ <class name="IOUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="buffSize" type="int"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param buffSize the size of the buffer
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another. <strong>closes the input and output streams
+ at the end</strong>.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads len bytes in a loop.
+ @param in The InputStream to read from
+ @param buf The buffer to fill
+ @param off offset from the buffer
+ @param len the length of bytes to read
+ @throws IOException if it could not read requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Similar to readFully(). Skips bytes in a loop.
+ @param in The InputStream to skip bytes from
+ @param len number of bytes to skip.
+ @throws IOException if it could not skip requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="closeables" type="java.io.Closeable[]"/>
+ <doc>
+ <![CDATA[Close the Closeable objects and <b>ignore</b> any {@link IOException} or
+ null pointers. Must only be used for cleanup in exception handlers.
+ @param log the log to record problems to at debug level. Can be null.
+ @param closeables the objects to close]]>
+ </doc>
+ </method>
+ <method name="closeStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Closeable"/>
+ <doc>
+ <![CDATA[Closes the stream ignoring {@link IOException}.
+ Must only be called in cleaning up from exception handlers.
+ @param stream the Stream to close]]>
+ </doc>
+ </method>
+ <method name="closeSocket"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <doc>
+ <![CDATA[Closes the socket ignoring {@link IOException}
+ @param sock the Socket to close]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An utility class for I/O related functionality.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils -->
+ <!-- start class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <class name="IOUtils.NullOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils.NullOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[/dev/null of OutputStreams.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <!-- start class org.apache.hadoop.io.LongWritable -->
+ <class name="LongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="LongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a LongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two LongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable -->
+ <!-- start class org.apache.hadoop.io.LongWritable.Comparator -->
+ <class name="LongWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <class name="LongWritable.DecreasingComparator" extends="org.apache.hadoop.io.LongWritable.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.DecreasingComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A decreasing Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <!-- start class org.apache.hadoop.io.MapFile -->
+ <class name="MapFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="oldName" type="java.lang.String"/>
+ <param name="newName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames an existing map directory.]]>
+ </doc>
+ </method>
+ <method name="delete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deletes the named map file.]]>
+ </doc>
+ </method>
+ <method name="fix" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"/>
+ <param name="valueClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"/>
+ <param name="dryrun" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This method attempts to fix a corrupt MapFile by re-creating its index.
+ @param fs filesystem
+ @param dir directory containing the MapFile data and index
+ @param keyClass key class (has to be a subclass of Writable)
+ @param valueClass value class (has to be a subclass of Writable)
+ @param dryrun do not perform any changes, just report what needs to be done
+ @return number of valid entries in this MapFile, or -1 if no fixing was needed
+ @throws Exception]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="INDEX_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the index file.]]>
+ </doc>
+ </field>
+ <field name="DATA_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the data file.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A file-based map from keys to values.
+
+ <p>A map is a directory containing two files, the <code>data</code> file,
+ containing all keys and values in the map, and a smaller <code>index</code>
+ file, containing a fraction of the keys. The fraction is determined by
+ {@link Writer#getIndexInterval()}.
+
+ <p>The index file is read entirely into memory. Thus key implementations
+ should try to keep themselves small.
+
+ <p>Map files are created by adding entries in-order. To maintain a large
+ database, perform updates by copying the previous version of a database and
+ merging in a sorted change list, to create a new version of the database in
+ a new file. Sorting large change lists can be done with {@link
+ SequenceFile.Sorter}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile -->
+ <!-- start class org.apache.hadoop.io.MapFile.Reader -->
+ <class name="MapFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map using the named comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Hook to allow subclasses to defer opening streams until further
+ initialization is complete.
+ @see #createDataFileReader(FileSystem, Path, Configuration)]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="open"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dirName" type="java.lang.String"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDataFileReader" return="org.apache.hadoop.io.SequenceFile.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dataFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link SequenceFile.Reader} returned.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Re-positions the reader before its first key.]]>
+ </doc>
+ </method>
+ <method name="midKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the key at approximately the middle of the file.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalKey"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the final key from the file.
+
+ @param key key to read into]]>
+ </doc>
+ </method>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader at the named key, or if none such exists, at the
+ first entry after the named key. Returns true iff the named key exists
+ in this map.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the map into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ the end of the map]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the value for the named key, or null if none exists.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+ Returns <code>key</code> or if it does not exist, at the first entry
+ after the named key.
+
+- * @param key - key that we're trying to find
+- * @param val - data value if key is found
+- * @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <param name="before" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+
+ @param key - key that we're trying to find
+ @param val - data value if key is found
+ @param before - IF true, and <code>key</code> does not exist, return
+ the first entry that falls just before the <code>key</code>. Otherwise,
+ return the record that sorts just after.
+ @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Reader -->
+ <!-- start class org.apache.hadoop.io.MapFile.Writer -->
+ <class name="MapFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <method name="getIndexInterval" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of entries that are added before an index entry is added.]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval.
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval and stores it in conf
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair to the map. The key must be greater or equal
+ to the previous key added to the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writes a new map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Writer -->
+ <!-- start class org.apache.hadoop.io.MapWritable -->
+ <class name="MapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Map&lt;org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapWritable" type="org.apache.hadoop.io.MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map&lt;? extends org.apache.hadoop.io.Writable, ? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable Map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapWritable -->
+ <!-- start class org.apache.hadoop.io.MD5Hash -->
+ <class name="MD5Hash" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable&lt;org.apache.hadoop.io.MD5Hash&gt;"/>
+ <constructor name="MD5Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash from a hex string.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash with a specified value.]]>
+ </doc>
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs, reads and returns an instance.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+ <doc>
+ <![CDATA[Copy the contents of another instance into this instance.]]>
+ </doc>
+ </method>
+ <method name="getDigest" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the digest bytes.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a hash value for the content from the InputStream.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="halfDigest" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a half-sized version of this MD5. Fits in a long]]>
+ </doc>
+ </method>
+ <method name="quarterDigest" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a 32-bit digest of the MD5.
+ @return the first 4 bytes of the md5]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is an MD5Hash whose digest contains the
+ same values.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for this object.
+ Only uses the first 4 bytes, since md5s are evenly distributed.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+ <doc>
+ <![CDATA[Compares this object with the specified object for order.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="setDigest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the digest value from a hex string.]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Writable for MD5 hash values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash -->
+ <!-- start class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <class name="MD5Hash.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MD5Hash.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for MD5Hash keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <!-- start class org.apache.hadoop.io.MultipleIOException -->
+ <class name="MultipleIOException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getExceptions" return="java.util.List&lt;java.io.IOException&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the underlying exceptions]]>
+ </doc>
+ </method>
+ <method name="createIOException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="exceptions" type="java.util.List&lt;java.io.IOException&gt;"/>
+ <doc>
+ <![CDATA[A convenient method to create an {@link IOException}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Encapsulate a list of {@link IOException} into an {@link IOException}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MultipleIOException -->
+ <!-- start class org.apache.hadoop.io.NullWritable -->
+ <class name="NullWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <method name="get" return="org.apache.hadoop.io.NullWritable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the single instance of this class.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Singleton Writable with no data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable -->
+ <!-- start class org.apache.hadoop.io.NullWritable.Comparator -->
+ <class name="NullWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator &quot;optimized&quot; for NullWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ObjectWritable -->
+ <class name="ObjectWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="ObjectWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Class, java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the instance, or null if none.]]>
+ </doc>
+ </method>
+ <method name="getDeclaredClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the class this is meant to be.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Reset the instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeObject"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="instance" type="java.lang.Object"/>
+ <param name="declaredClass" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="objectWritable" type="org.apache.hadoop.io.ObjectWritable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A polymorphic Writable that writes an instance with it's class name.
+ Handles arrays, strings and primitive types without a Writable wrapper.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ObjectWritable -->
+ <!-- start class org.apache.hadoop.io.OutputBuffer -->
+ <class name="OutputBuffer" extends="java.io.FilterOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.OutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a InputStream directly into the buffer.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link OutputStream} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new OutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ OutputBuffer buffer = new OutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using OutputStream methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>
+ @see DataOutputBuffer
+ @see InputBuffer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.OutputBuffer -->
+ <!-- start interface org.apache.hadoop.io.RawComparator -->
+ <interface name="RawComparator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Comparator&lt;T&gt;"/>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link Comparator} that operates directly on byte representations of
+ objects.
+ </p>
+ @param <T>
+ @see DeserializerComparator]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.RawComparator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile -->
+ <class name="SequenceFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the compression type for the reduce outputs
+ @param job the job config to look in
+ @return the kind of compression to use
+ @deprecated Use
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the compression type for sequence files.
+ @param job the configuration to modify
+ @param val the new compression type (none, block, record)
+ @deprecated Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param bufferSize buffer size for the underlaying outputstream.
+ @param replication replication factor for the file.
+ @param blockSize block size for the file.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="SYNC_INTERVAL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes between sync points.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<code>SequenceFile</code>s are flat files consisting of binary key/value
+ pairs.
+
+ <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and
+ {@link Sorter} classes for writing, reading and sorting respectively.</p>
+
+ There are three <code>SequenceFile</code> <code>Writer</code>s based on the
+ {@link CompressionType} used to compress key/value pairs:
+ <ol>
+ <li>
+ <code>Writer</code> : Uncompressed records.
+ </li>
+ <li>
+ <code>RecordCompressWriter</code> : Record-compressed files, only compress
+ values.
+ </li>
+ <li>
+ <code>BlockCompressWriter</code> : Block-compressed files, both keys &
+ values are collected in 'blocks'
+ separately and compressed. The size of
+ the 'block' is configurable.
+ </ol>
+
+ <p>The actual compression algorithm used to compress key and/or values can be
+ specified by using the appropriate {@link CompressionCodec}.</p>
+
+ <p>The recommended way is to use the static <tt>createWriter</tt> methods
+ provided by the <code>SequenceFile</code> to chose the preferred format.</p>
+
+ <p>The {@link Reader} acts as the bridge and can read any of the above
+ <code>SequenceFile</code> formats.</p>
+
+ <h4 id="Formats">SequenceFile Formats</h4>
+
+ <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
+ depending on the <code>CompressionType</code> specified. All of them share a
+ <a href="#Header">common header</a> described below.
+
+ <h5 id="Header">SequenceFile Header</h5>
+ <ul>
+ <li>
+ version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual
+ version number (e.g. SEQ4 or SEQ6)
+ </li>
+ <li>
+ keyClassName -key class
+ </li>
+ <li>
+ valueClassName - value class
+ </li>
+ <li>
+ compression - A boolean which specifies if compression is turned on for
+ keys/values in this file.
+ </li>
+ <li>
+ blockCompression - A boolean which specifies if block-compression is
+ turned on for keys/values in this file.
+ </li>
+ <li>
+ compression codec - <code>CompressionCodec</code> class which is used for
+ compression of keys and/or values (if compression is
+ enabled).
+ </li>
+ <li>
+ metadata - {@link Metadata} for this file.
+ </li>
+ <li>
+ sync - A sync marker to denote end of the header.
+ </li>
+ </ul>
+
+ <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li>Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li><i>Compressed</i> Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record <i>Block</i>
+ <ul>
+ <li>Compressed key-lengths block-size</li>
+ <li>Compressed key-lengths block</li>
+ <li>Compressed keys block-size</li>
+ <li>Compressed keys block</li>
+ <li>Compressed value-lengths block-size</li>
+ <li>Compressed value-lengths block</li>
+ <li>Compressed values block-size</li>
+ <li>Compressed values block</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <p>The compressed blocks of key lengths and value lengths consist of the
+ actual lengths of individual keys/values encoded in ZeroCompressedInteger
+ format.</p>
+
+ @see CompressionCodec]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <class name="SequenceFile.CompressionType" extends="java.lang.Enum&lt;org.apache.hadoop.io.SequenceFile.CompressionType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.SequenceFile.CompressionType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression type used to compress key/value pairs in the
+ {@link SequenceFile}.
+
+ @see SequenceFile.Writer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <class name="SequenceFile.Metadata" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFile.Metadata" type="java.util.TreeMap&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="getMetadata" return="java.util.TreeMap&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The class encapsulating with the metadata of a file.
+ The metadata of a file is a list of attribute name/value
+ pairs of Text type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Reader -->
+ <class name="SequenceFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Reader" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the named file.]]>
+ </doc>
+ </constructor>
+ <method name="openFile" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link FSDataInputStream} returned.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the key class.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the value class.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="isCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if values are compressed.]]>
+ </doc>
+ </method>
+ <method name="isBlockCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if records are block-compressed.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="getMetadata" return="org.apache.hadoop.io.SequenceFile.Metadata"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the metadata object of the file]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file into <code>key</code>, skipping its
+ value. True if another entry exists, and false at end of file.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the file into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ end of file]]>
+ </doc>
+ </method>
+ <method name="next" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.">
+ <param name="buffer" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.]]>
+ </doc>
+ </method>
+ <method name="createValueBytes" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRaw" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' records.
+ @param key - The buffer into which the key is read
+ @param val - The 'raw' value
+ @return Returns the total record length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawKey" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' keys.
+ @param key - The buffer into which the key is read
+ @return Returns the key length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="java.lang.Object"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file, skipping its
+ value. Return null at end of file.]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' values.
+ @param val - The 'raw' value
+ @return Returns the value length
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the current byte position in the input file.
+
+ <p>The position passed must be a position returned by {@link
+ SequenceFile.Writer#getLength()} when writing this file. To seek to an arbitrary
+ position, use {@link SequenceFile.Reader#sync(long)}.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the next sync mark past a given position.]]>
+ </doc>
+ </method>
+ <method name="syncSeen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true iff the previous call to next passed a sync mark.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current byte position in the input file.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reads key/value pairs from a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <class name="SequenceFile.Sorter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge files containing the named classes.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.io.RawComparator, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge using an arbitrary {@link RawComparator}.]]>
+ </doc>
+ </constructor>
+ <method name="setFactor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="factor" type="int"/>
+ <doc>
+ <![CDATA[Set the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="getFactor" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="setMemory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="memory" type="int"/>
+ <doc>
+ <![CDATA[Set the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="getMemory" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setProgressable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progressable" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Set the progressable object in order to report progress.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files into an output file.
+ @param inFiles the files to be sorted
+ @param outFile the sorted output file
+ @param deleteInput should the input files be deleted as they are read?]]>
+ </doc>
+ </method>
+ <method name="sortAndIterate" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files and return an iterator.
+ @param inFiles the files to be sorted
+ @param tempDir the directory where temp files are created during sort
+ @param deleteInput should the input files be deleted as they are read?
+ @return iterator the RawKeyValueIterator]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The backwards compatible interface to sort.
+ @param inFile the input file to sort
+ @param outFile the sorted output file]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="segments" type="java.util.List&lt;org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor&gt;"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the list of segments of type <code>SegmentDescriptor</code>
+ @param segments the list of SegmentDescriptors
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIterator
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[] using a max factor value
+ that is already set
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="factor" type="int"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param factor the factor that will be used as the maximum merge fan-in
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInputs" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param tempDir the directory for creating temp files during merge
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cloneFileAttributes" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="prog" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clones the attributes (like compression of the input file and creates a
+ corresponding Writer
+ @param inputFile the path of the input file whose attributes should be
+ cloned
+ @param outputFile the path of the output file
+ @param prog the Progressable to report status during the file write
+ @return Writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="records" type="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"/>
+ <param name="writer" type="org.apache.hadoop.io.SequenceFile.Writer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes records from RawKeyValueIterator into a file represented by the
+ passed writer
+ @param records the RawKeyValueIterator
+ @param writer the Writer created earlier
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merge the provided files.
+ @param inFiles the array of input path names
+ @param outFile the final output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sorts key/value pairs in a sequence-format file.
+
+ <p>For best performance, applications should make sure that the {@link
+ Writable#readFields(DataInput)} implementation of their keys is
+ very efficient. In particular, it should avoid allocating memory.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <interface name="SequenceFile.Sorter.RawKeyValueIterator" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw key
+ @return DataOutputBuffer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getValue" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw value
+ @return ValueBytes
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets up the current key and value (for getKey and getValue)
+ @return true if there exists a key/value, false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[closes the iterator so that the underlying streams can be closed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the Progress object; this has a float (0.0 - 1.0)
+ indicating the bytes processed by the iterator so far]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to iterate over raw keys/values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <class name="SequenceFile.Sorter.SegmentDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="SequenceFile.Sorter.SegmentDescriptor" type="long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a segment
+ @param segmentOffset the offset of the segment in the file
+ @param segmentLength the length of the segment
+ @param segmentPathName the path name of the file containing the segment]]>
+ </doc>
+ </constructor>
+ <method name="doSync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do the sync checks]]>
+ </doc>
+ </method>
+ <method name="preserveInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="preserve" type="boolean"/>
+ <doc>
+ <![CDATA[Whether to delete the files when no longer needed]]>
+ </doc>
+ </method>
+ <method name="shouldPreserveInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRawKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the rawKey object with the key returned by the Reader
+ @return true if there is a key returned; false, otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rawValue" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the passed rawValue with the value corresponding to the key
+ read earlier
+ @param rawValue
+ @return the length of the value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the stored rawKey]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The default cleanup. Subclasses can override this with a custom
+ cleanup]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class defines a merge segment. This class can be subclassed to
+ provide a customized cleanup method implementation. In this
+ implementation, cleanup closes the file handle and deletes the file]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <interface name="SequenceFile.ValueBytes" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the uncompressed bytes to the outStream.
+ @param outStream : Stream to write uncompressed bytes into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to outStream.
+ Note: that it will NOT compress the bytes if they are not compressed.
+ @param outStream : Stream to write compressed bytes into.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Size of stored data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to 'raw' values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Writer -->
+ <class name="SequenceFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, int, short, long, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a sync point]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="appendRaw"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keyData" type="byte[]"/>
+ <param name="keyOffset" type="int"/>
+ <param name="keyLength" type="int"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current length of the output file.
+
+ <p>This always returns a synchronized position. In other words,
+ immediately after calling {@link SequenceFile.Reader#seek(long)} with a position
+ returned by this method, {@link SequenceFile.Reader#next(Writable)} may be called. However
+ the key may be earlier in the file than key last written when this
+ method was called (e.g., with block-compression, it may be the first key
+ in the block that was being written when this method was called).]]>
+ </doc>
+ </method>
+ <field name="keySerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="uncompressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="compressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Write key/value pairs to a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SetFile -->
+ <class name="SetFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A file-based set of keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile -->
+ <!-- start class org.apache.hadoop.io.SetFile.Reader -->
+ <class name="SetFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set using the named comparator.]]>
+ </doc>
+ </constructor>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in a set into <code>key</code>. Returns
+ true if such a key exists and false when at the end of the set.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the matching key from a set into <code>key</code>.
+ Returns <code>key</code>, or null if no match exists.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SetFile.Writer -->
+ <class name="SetFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="pass a Configuration too">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named set for keys of the named class.
+ @deprecated pass a Configuration too]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element class and compression type.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element comparator and compression type.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key to a set. The key must be strictly greater than the
+ previous key added to the set.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SortedMapWritable -->
+ <class name="SortedMapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="SortedMapWritable" type="org.apache.hadoop.io.SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="comparator" return="java.util.Comparator&lt;? super org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="firstKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="headMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="lastKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="subMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="tailMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set&lt;org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map&lt;? extends org.apache.hadoop.io.WritableComparable, ? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable SortedMap.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SortedMapWritable -->
+ <!-- start interface org.apache.hadoop.io.Stringifier -->
+ <interface name="Stringifier" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Converts the object to a string representation
+ @param obj the object to convert
+ @return the string representation of the object
+ @throws IOException if the object cannot be converted]]>
+ </doc>
+ </method>
+ <method name="fromString" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from its string representation.
+ @param str the string representation of the object
+ @return restored object
+ @throws IOException if the object cannot be restored]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes this object.
+ @throws IOException if an I/O error occurs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stringifier interface offers two methods to convert an object
+ to a string representation and restore the object given its
+ string representation.
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Stringifier -->
+ <!-- start class org.apache.hadoop.io.Text -->
+ <class name="Text" extends="org.apache.hadoop.io.BinaryComparable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable&lt;org.apache.hadoop.io.BinaryComparable&gt;"/>
+ <constructor name="Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Text" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a string.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="org.apache.hadoop.io.Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from another text.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a byte array.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the raw bytes; however, only data up to {@link #getLength()} is
+ valid.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of bytes in the byte array]]>
+ </doc>
+ </method>
+ <method name="charAt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="int"/>
+ <doc>
+ <![CDATA[Returns the Unicode Scalar Value (32-bit integer value)
+ for the character at <code>position</code>. Note that this
+ method avoids using the converter or doing String instatiation
+ @return the Unicode scalar value at position or -1
+ if the position is invalid or points to a
+ trailing byte]]>
+ </doc>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Finds any occurence of <code>what</code> in the backing
+ buffer, starting as position <code>start</code>. The starting
+ position is measured in bytes and the return value is in
+ terms of byte position in the buffer. The backing buffer is
+ not converted to a string for this operation.
+ @return byte position of the first occurence of the search
+ string in the UTF-8 buffer or -1 if not found]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <doc>
+ <![CDATA[Set to a utf8 byte array]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[copy a text.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Set the Text to range of bytes
+ @param utf8 the data to copy from
+ @param start the first position of the new string
+ @param len the number of bytes of the new string]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Append a range of bytes to the end of the given text
+ @param utf8 the data to copy from
+ @param start the first position to append from utf8
+ @param len the number of bytes to append]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Clear the string to empty.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert text back to string
+ @see java.lang.Object#toString()]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialize]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one Text in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialize
+ write this object to out
+ length uses zero-compressed encoding
+ @see Writable#write(DataOutput)]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a Text with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If the input is malformed,
+ replace by a default value.]]>
+ </doc>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If the input is malformed,
+ invalid chars are replaced by a default value.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF8 encoded string from in]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF8 encoded string to out]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check if a byte array contains valid utf-8
+ @param utf8 byte array
+ @throws MalformedInputException if the byte array contains invalid utf-8]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check to see if a byte array is valid utf-8
+ @param utf8 the array of bytes
+ @param start the offset of the first byte in the array
+ @param len the length of the byte sequence
+ @throws MalformedInputException if the byte array contains invalid bytes]]>
+ </doc>
+ </method>
+ <method name="bytesToCodePoint" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="java.nio.ByteBuffer"/>
+ <doc>
+ <![CDATA[Returns the next code point at the current position in
+ the buffer. The buffer's position will be incremented.
+ Any mark set on this buffer will be changed by this method!]]>
+ </doc>
+ </method>
+ <method name="utf8Length" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[For the given string, returns the number of UTF-8 bytes
+ required to encode the string.
+ @param string text to encode
+ @return number of UTF-8 bytes required to encode]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class stores text using standard UTF8 encoding. It provides methods
+ to serialize, deserialize, and compare texts at byte level. The type of
+ length is integer and is serialized using zero-compressed format. <p>In
+ addition, it provides methods for string traversal without converting the
+ byte array to a string. <p>Also includes utilities for
+ serializing/deserialing a string, coding/decoding a string, checking if a
+ byte array contains valid UTF8 code, calculating the length of an encoded
+ string.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text -->
+ <!-- start class org.apache.hadoop.io.Text.Comparator -->
+ <class name="Text.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Text.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for Text keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text.Comparator -->
+ <!-- start class org.apache.hadoop.io.TwoDArrayWritable -->
+ <class name="TwoDArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[][]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[][]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[][]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for 2D arrays containing a matrix of instances of a class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.TwoDArrayWritable -->
+ <!-- start class org.apache.hadoop.io.UTF8 -->
+ <class name="UTF8" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="replaced by Text">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UTF8" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <constructor name="UTF8" type="org.apache.hadoop.io.UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw bytes.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the encoded string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one UTF8 in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare two UTF8s.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert to a String.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a UTF8 with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convert a string to a UTF-8 encoded byte array.
+ @see String#getBytes(String)]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string.
+
+ @see DataInput#readUTF()]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF-8 encoded string.
+
+ @see DataOutput#writeUTF(String)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for strings that uses the UTF8 encoding.
+
+ <p>Also includes utilities for efficiently reading and writing UTF-8.
+
+ @deprecated replaced by Text]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8 -->
+ <!-- start class org.apache.hadoop.io.UTF8.Comparator -->
+ <class name="UTF8.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UTF8.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8.Comparator -->
+ <!-- start class org.apache.hadoop.io.VersionedWritable -->
+ <class name="VersionedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="VersionedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="byte"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the version number of the current implementation.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A base class for Writables that provides version checking.
+
+ <p>This is useful when a class may evolve, so that instances written by the
+ old version of the class may still be processed by the new version. To
+ handle this situation, {@link #readFields(DataInput)}
+ implementations should catch {@link VersionMismatchException}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionedWritable -->
+ <!-- start class org.apache.hadoop.io.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="byte, byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Thrown by {@link VersionedWritable#readFields(DataInput)} when the
+ version of an object being read does not match the current implementation
+ version as returned by {@link VersionedWritable#getVersion()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionMismatchException -->
+ <!-- start class org.apache.hadoop.io.VIntWritable -->
+ <class name="VIntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VIntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VIntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VIntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VIntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for integer values stored in variable-length format.
+ Such values take between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVInt(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VIntWritable -->
+ <!-- start class org.apache.hadoop.io.VLongWritable -->
+ <class name="VLongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VLongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VLongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VLongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VLongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs in a variable-length format. Such values take
+ between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVLong(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VLongWritable -->
+ <!-- start interface org.apache.hadoop.io.Writable -->
+ <interface name="Writable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the fields of this object to <code>out</code>.
+
+ @param out <code>DataOuput</code> to serialize this object into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the fields of this object from <code>in</code>.
+
+ <p>For efficiency, implementations should attempt to re-use storage in the
+ existing object where possible.</p>
+
+ @param in <code>DataInput</code> to deseriablize this object from.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A serializable object which implements a simple, efficient, serialization
+ protocol, based on {@link DataInput} and {@link DataOutput}.
+
+ <p>Any <code>key</code> or <code>value</code> type in the Hadoop Map-Reduce
+ framework implements this interface.</p>
+
+ <p>Implementations typically implement a static <code>read(DataInput)</code>
+ method which constructs a new instance, calls {@link #readFields(DataInput)}
+ and returns the instance.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritable implements Writable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public static MyWritable read(DataInput in) throws IOException {
+ MyWritable w = new MyWritable();
+ w.readFields(in);
+ return w;
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Writable -->
+ <!-- start interface org.apache.hadoop.io.WritableComparable -->
+ <interface name="WritableComparable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable&lt;T&gt;"/>
+ <doc>
+ <![CDATA[A {@link Writable} which is also {@link Comparable}.
+
+ <p><code>WritableComparable</code>s can be compared to each other, typically
+ via <code>Comparator</code>s. Any type which is to be used as a
+ <code>key</code> in the Hadoop Map-Reduce framework should implement this
+ interface.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritableComparable implements WritableComparable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public int compareTo(MyWritableComparable w) {
+ int thisValue = this.value;
+ int thatValue = ((IntWritable)o).value;
+ return (thisValue &lt; thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableComparable -->
+ <!-- start class org.apache.hadoop.io.WritableComparator -->
+ <class name="WritableComparator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator"/>
+ <constructor name="WritableComparator" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </constructor>
+ <constructor name="WritableComparator" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"/>
+ <doc>
+ <![CDATA[Get a comparator for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link WritableComparable}
+ implementation.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the WritableComparable implementation class.]]>
+ </doc>
+ </method>
+ <method name="newKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a new {@link WritableComparable} instance.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Optimization hook. Override this to make SequenceFile.Sorter's scream.
+
+ <p>The default implementation reads the data into two {@link
+ WritableComparable}s (using {@link
+ Writable#readFields(DataInput)}, then calls {@link
+ #compare(WritableComparable,WritableComparable)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[Compare two WritableComparables.
+
+ <p> The default implementation uses the natural ordering, calling {@link
+ Comparable#compareTo(Object)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <method name="hashBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Compute hash for binary data.]]>
+ </doc>
+ </method>
+ <method name="readUnsignedShort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an unsigned short from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an integer from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a long from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator for {@link WritableComparable}s.
+
+ <p>This base implemenation uses the natural ordering. To define alternate
+ orderings, override {@link #compare(WritableComparable,WritableComparable)}.
+
+ <p>One may optimize compare-intensive operations by overriding
+ {@link #compare(byte[],int,int,byte[],int,int)}. Static utility methods are
+ provided to assist in optimized implementations of this method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableComparator -->
+ <!-- start class org.apache.hadoop.io.WritableFactories -->
+ <class name="WritableFactories" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="factory" type="org.apache.hadoop.io.WritableFactory"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.io.WritableFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factories for non-public writables. Defining a factory permits {@link
+ ObjectWritable} to be able to construct instances of non-public classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableFactories -->
+ <!-- start interface org.apache.hadoop.io.WritableFactory -->
+ <interface name="WritableFactory" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a new instance.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A factory for a class of Writable.
+ @see WritableFactories]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableFactory -->
+ <!-- start class org.apache.hadoop.io.WritableName -->
+ <class name="WritableName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the name that a class should be known as to something other than the
+ class name.]]>
+ </doc>
+ </method>
+ <method name="addName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add an alternate name for a class.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Return the name for a class. Default is {@link Class#getName()}.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the class for a name. Default is {@link Class#forName(String)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility to permit renaming of Writable implementation classes without
+ invalidiating files that contain their class name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableName -->
+ <!-- start class org.apache.hadoop.io.WritableUtils -->
+ <class name="WritableUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="WritableUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readCompressedByteArray" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skipCompressedByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedByteArray" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="bytes" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="displayByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="record" type="byte[]"/>
+ </method>
+ <method name="clone" return="T extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="orig" type="T extends org.apache.hadoop.io.Writable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Make a copy of a writable object using serialization to a buffer.
+ @param orig The object to copy
+ @return The copied object]]>
+ </doc>
+ </method>
+ <method name="cloneInto"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.io.Writable"/>
+ <param name="src" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a copy of the writable object using serialiation to a buffer
+ @param dst the object to copy from
+ @param src the object to copy into, which is destroyed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an integer to a binary stream with zero-compressed encoding.
+ For -120 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ integer is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -121 and -124, the following integer
+ is positive, with number of bytes that follow are -(v+120).
+ If the first byte value v is between -125 and -128, the following integer
+ is negative, with number of bytes that follow are -(v+124). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Integer to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized long from stream.]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized integer from stream.]]>
+ </doc>
+ </method>
+ <method name="isNegativeVInt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Given the first byte of a vint/vlong, determine the sign
+ @param value the first byte
+ @return is the value negative]]>
+ </doc>
+ </method>
+ <method name="decodeVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Parse the first byte of a vint/vlong to determine the number of bytes
+ @param value the first byte of the vint/vlong
+ @return the total number of bytes (1 to 9)]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="readEnum" return="T extends java.lang.Enum&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="enumType" type="java.lang.Class&lt;T&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an Enum value from DataInput, Enums are read and written
+ using String values.
+ @param <T> Enum type
+ @param in DataInput to read from
+ @param enumType Class type of Enum
+ @return Enum represented by String read from DataInput
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeEnum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="enumVal" type="java.lang.Enum&lt;?&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[writes String value of enum to DataOutput.
+ @param out Dataoutput stream
+ @param enumVal enum value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip <i>len</i> number of bytes in input stream<i>in</i>
+ @param in input stream
+ @param len number of bytes to skip
+ @throws IOException when skipped less number of bytes]]>
+ </doc>
+ </method>
+ <method name="toByteArray" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writables" type="org.apache.hadoop.io.Writable[]"/>
+ <doc>
+ <![CDATA[Convert writables to a byte array]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableUtils -->
+</package>
+<package name="org.apache.hadoop.io.compress">
+ <!-- start class org.apache.hadoop.io.compress.BZip2Codec -->
+ <class name="BZip2Codec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="BZip2Codec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BZip2Codec]]>
+ </doc>
+ </constructor>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates CompressionOutputStream for BZip2
+
+ @param out
+ The output Stream
+ @return The BZip2 CompressionOutputStream
+ @throws java.io.IOException
+ Throws IO exception]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates CompressionInputStream to be used to read off uncompressed data.
+
+ @param in
+ The InputStream
+ @return Returns CompressionInputStream for BZip2
+ @throws java.io.IOException
+ Throws IOException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[.bz2 is recognized as the default extension for compressed BZip2 files
+
+ @return A String telling the default bzip2 file extension]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class provides CompressionOutputStream and CompressionInputStream for
+ compression and decompression. Currently we dont have an implementation of
+ the Compressor and Decompressor interfaces, so those methods of
+ CompressionCodec which have a Compressor or Decompressor type argument, throw
+ UnsupportedOperationException.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.BZip2Codec -->
+ <!-- start class org.apache.hadoop.io.compress.CodecPool -->
+ <class name="CodecPool" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CodecPool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <doc>
+ <![CDATA[Get a {@link Compressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the
+ <code>Compressor</code>
+ @return <code>Compressor</code> for the given
+ <code>CompressionCodec</code> from the pool or a new one]]>
+ </doc>
+ </method>
+ <method name="getDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <doc>
+ <![CDATA[Get a {@link Decompressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the
+ <code>Decompressor</code>
+ @return <code>Decompressor</code> for the given
+ <code>CompressionCodec</code> the pool or a new one]]>
+ </doc>
+ </method>
+ <method name="returnCompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <doc>
+ <![CDATA[Return the {@link Compressor} to the pool.
+
+ @param compressor the <code>Compressor</code> to be returned to the pool]]>
+ </doc>
+ </method>
+ <method name="returnDecompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <doc>
+ <![CDATA[Return the {@link Decompressor} to the pool.
+
+ @param decompressor the <code>Decompressor</code> to be returned to the
+ pool]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A global compressor/decompressor pool used to save and reuse
+ (possibly native) compression/decompression codecs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CodecPool -->
+ <!-- start interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <interface name="CompressionCodec" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream}.
+
+ @param out the location for the final output stream
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream} with the given {@link Compressor}.
+
+ @param out the location for the final output stream
+ @param compressor compressor to use
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
+
+ @return the type of compressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Compressor} for use by this {@link CompressionCodec}.
+
+ @return a new compressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a stream decompressor that will read from the given input stream.
+
+ @param in the stream to read compressed bytes from
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionInputStream} that will read from the given
+ {@link InputStream} with the given {@link Decompressor}.
+
+ @param in the stream to read compressed bytes from
+ @param decompressor decompressor to use
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
+
+ @return the type of decompressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
+
+ @return a new decompressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a streaming compression/decompression pair.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <class name="CompressionCodecFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionCodecFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Find the codecs specified in the config value io.compression.codecs
+ and register them. Defaults to gzip and zip.]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Print the extension map out as a string.]]>
+ </doc>
+ </method>
+ <method name="getCodecClasses" return="java.util.List&lt;java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the list of codecs listed in the configuration
+ @param conf the configuration to look in
+ @return a list of the Configuration classes or null if the attribute
+ was not set]]>
+ </doc>
+ </method>
+ <method name="setCodecClasses"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="classes" type="java.util.List&lt;java.lang.Class&gt;"/>
+ <doc>
+ <![CDATA[Sets a list of codec classes in the configuration.
+ @param conf the configuration to modify
+ @param classes the list of classes to set]]>
+ </doc>
+ </method>
+ <method name="getCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Find the relevant compression codec for the given file based on its
+ filename suffix.
+ @param file the filename to check
+ @return the codec object]]>
+ </doc>
+ </method>
+ <method name="removeSuffix" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes a suffix from a filename, if it has it.
+ @param filename the filename to strip
+ @param suffix the suffix to remove
+ @return the shortened filename]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[A little test program.
+ @param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A factory that will find the correct codec for a given filename.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <class name="CompressionInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression input stream that reads
+ the decompressed bytes from the given stream.
+
+ @param in The input stream to be compressed.]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read bytes from the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the decompressor to its initial state and discard any buffered data,
+ as the underlying stream may have been repositioned.]]>
+ </doc>
+ </method>
+ <field name="in" type="java.io.InputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The input stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression input stream.
+
+ <p>Implementations are assumed to be buffered. This permits clients to
+ reposition the underlying input stream then call {@link #resetState()},
+ without having to also synchronize client buffers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <class name="CompressionOutputStream" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression output stream that writes
+ the compressed bytes to the given stream.
+ @param out]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finishes writing compressed data to the output stream
+ without closing the underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the compression to the initial state.
+ Does not reset the underlying stream.]]>
+ </doc>
+ </method>
+ <field name="out" type="java.io.OutputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The output stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression output stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <!-- start interface org.apache.hadoop.io.compress.Compressor -->
+ <interface name="Compressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for compression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of uncompressed bytes input so far.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of compressed bytes output so far.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[When called, indicates that compression should end
+ with the current contents of the input buffer.]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with compressed data. Returns actual number
+ of bytes of compressed data. A return value of 0 indicates that
+ needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets compressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the compressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'compressor' which can be
+ plugged into a {@link CompressionOutputStream} to compress data.
+ This is modelled after {@link java.util.zip.Deflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Compressor -->
+ <!-- start interface org.apache.hadoop.io.compress.Decompressor -->
+ <interface name="Decompressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for decompression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns <code>true</code> if a preset dictionary is needed for decompression.
+ @return <code>true</code> if a preset dictionary is needed for decompression]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with uncompressed data. Returns actual number
+ of bytes of uncompressed data. A return value of 0 indicates that
+ #needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets decompressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the decompressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'de-compressor' which can be
+ plugged into a {@link CompressionInputStream} to compress data.
+ This is modelled after {@link java.util.zip.Inflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Decompressor -->
+ <!-- start class org.apache.hadoop.io.compress.DefaultCodec -->
+ <class name="DefaultCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="DefaultCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.DefaultCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec -->
+ <class name="GzipCodec" extends="org.apache.hadoop.io.compress.DefaultCodec"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class creates gzip compressors/decompressors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <class name="GzipCodec.GzipInputStream" extends="org.apache.hadoop.io.compress.DecompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipInputStream" type="org.apache.hadoop.io.compress.DecompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow subclasses to directly set the inflater stream.]]>
+ </doc>
+ </constructor>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <class name="GzipCodec.GzipOutputStream" extends="org.apache.hadoop.io.compress.CompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipOutputStream" type="org.apache.hadoop.io.compress.CompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow children types to put a different type in here.
+ @param out the Deflater stream to use]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A bridge that wraps around a DeflaterOutputStream to make it
+ a CompressionOutputStream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <!-- start class org.apache.hadoop.io.compress.LzoCodec -->
+ <class name="LzoCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="LzoCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-lzo library is loaded & initialized.
+
+ @param conf configuration
+ @return <code>true</code> if native-lzo library is loaded & initialized;
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link org.apache.hadoop.io.compress.CompressionCodec} for a streaming
+ <b>lzo</b> compression/decompression pair.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzoCodec -->
+ <!-- start class org.apache.hadoop.io.compress.LzopCodec -->
+ <class name="LzopCodec" extends="org.apache.hadoop.io.compress.LzoCodec"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LzopCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link org.apache.hadoop.io.compress.CompressionCodec} for a streaming
+ <b>lzo</b> compression/decompression pair compatible with lzop.
+ http://www.lzop.org/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzopCodec -->
+ <!-- start class org.apache.hadoop.io.compress.LzopCodec.LzopDecompressor -->
+ <class name="LzopCodec.LzopDecompressor" extends="org.apache.hadoop.io.compress.lzo.LzoDecompressor"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="LzopCodec.LzopDecompressor" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an LzoDecompressor with LZO1X strategy (the only lzo algorithm
+ supported by lzop).]]>
+ </doc>
+ </constructor>
+ <method name="initHeaderFlags"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dflags" type="java.util.EnumSet&lt;org.apache.hadoop.io.compress.LzopCodec.DChecksum&gt;"/>
+ <param name="cflags" type="java.util.EnumSet&lt;org.apache.hadoop.io.compress.LzopCodec.CChecksum&gt;"/>
+ <doc>
+ <![CDATA[Given a set of decompressed and compressed checksums,]]>
+ </doc>
+ </method>
+ <method name="resetChecksum"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all checksums registered for this decompressor instance.]]>
+ </doc>
+ </method>
+ <method name="verifyDChecksum" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="typ" type="org.apache.hadoop.io.compress.LzopCodec.DChecksum"/>
+ <param name="checksum" type="int"/>
+ <doc>
+ <![CDATA[Given a checksum type, verify its value against that observed in
+ decompressed data.]]>
+ </doc>
+ </method>
+ <method name="verifyCChecksum" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="typ" type="org.apache.hadoop.io.compress.LzopCodec.CChecksum"/>
+ <param name="checksum" type="int"/>
+ <doc>
+ <![CDATA[Given a checksum type, verity its value against that observed in
+ compressed data.]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzopCodec.LzopDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.LzopCodec.LzopInputStream -->
+ <class name="LzopCodec.LzopInputStream" extends="org.apache.hadoop.io.compress.BlockDecompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="LzopCodec.LzopInputStream" type="java.io.InputStream, org.apache.hadoop.io.compress.Decompressor, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="readHeader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read and verify an lzo header, setting relevant block checksum options
+ and ignoring most everything else.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzopCodec.LzopInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.LzopCodec.LzopOutputStream -->
+ <class name="LzopCodec.LzopOutputStream" extends="org.apache.hadoop.io.compress.BlockCompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="LzopCodec.LzopOutputStream" type="java.io.OutputStream, org.apache.hadoop.io.compress.Compressor, int, org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="writeLzopHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="strategy" type="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write an lzop-compatible header to the OutputStream provided.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the underlying stream and write a null word to the output stream.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzopCodec.LzopOutputStream -->
+</package>
+<package name="org.apache.hadoop.io.compress.bzip2">
+ <!-- start interface org.apache.hadoop.io.compress.bzip2.BZip2Constants -->
+ <interface name="BZip2Constants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="baseBlockSize" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_ALPHA_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_CODE_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNA" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNB" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="N_GROUPS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="G_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="N_ITERS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_SELECTORS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NUM_OVERSHOOT_BYTES" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="rNums" type="int[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This array really shouldn't be here. Again, for historical purposes it
+ is.
+
+ <p>
+ FIXME: This array should be in a private or package private location,
+ since it could be modified by malicious code.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Base class for both the compress and decompress classes. Holds common arrays,
+ and static data.
+ <p>
+ This interface is public for historical purposes. You should have no need to
+ use it.
+ </p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.bzip2.BZip2Constants -->
+ <!-- start class org.apache.hadoop.io.compress.bzip2.CBZip2InputStream -->
+ <class name="CBZip2InputStream" extends="java.io.InputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.bzip2.BZip2Constants"/>
+ <constructor name="CBZip2InputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a new CBZip2InputStream which decompresses bytes read from the
+ specified stream.
+
+ <p>
+ Although BZip2 headers are marked with the magic <tt>"Bz"</tt> this
+ constructor expects the next byte in the stream to be the first one after
+ the magic. Thus callers have to skip the first two bytes. Otherwise this
+ constructor will throw an exception.
+ </p>
+
+ @throws IOException
+ if the stream content is malformed or an I/O error occurs.
+ @throws NullPointerException
+ if <tt>in == null</tt>]]>
+ </doc>
+ </constructor>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dest" type="byte[]"/>
+ <param name="offs" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An input stream that decompresses from the BZip2 format (without the file
+ header chars) to be read as any other stream.
+
+ <p>
+ The decompression requires large amounts of memory. Thus you should call the
+ {@link #close() close()} method as soon as possible, to force
+ <tt>CBZip2InputStream</tt> to release the allocated memory. See
+ {@link CBZip2OutputStream CBZip2OutputStream} for information about memory
+ usage.
+ </p>
+
+ <p>
+ <tt>CBZip2InputStream</tt> reads bytes from the compressed source stream via
+ the single byte {@link java.io.InputStream#read() read()} method exclusively.
+ Thus you should consider to use a buffered source stream.
+ </p>
+
+ <p>
+ Instances of this class are not threadsafe.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.bzip2.CBZip2InputStream -->
+ <!-- start class org.apache.hadoop.io.compress.bzip2.CBZip2OutputStream -->
+ <class name="CBZip2OutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.bzip2.BZip2Constants"/>
+ <constructor name="CBZip2OutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a new <tt>CBZip2OutputStream</tt> with a blocksize of 900k.
+
+ <p>
+ <b>Attention: </b>The caller is resonsible to write the two BZip2 magic
+ bytes <tt>"BZ"</tt> to the specified stream prior to calling this
+ constructor.
+ </p>
+
+ @param out *
+ the destination stream.
+
+ @throws IOException
+ if an I/O error occurs in the specified stream.
+ @throws NullPointerException
+ if <code>out == null</code>.]]>
+ </doc>
+ </constructor>
+ <constructor name="CBZip2OutputStream" type="java.io.OutputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a new <tt>CBZip2OutputStream</tt> with specified blocksize.
+
+ <p>
+ <b>Attention: </b>The caller is resonsible to write the two BZip2 magic
+ bytes <tt>"BZ"</tt> to the specified stream prior to calling this
+ constructor.
+ </p>
+
+
+ @param out
+ the destination stream.
+ @param blockSize
+ the blockSize as 100k units.
+
+ @throws IOException
+ if an I/O error occurs in the specified stream.
+ @throws IllegalArgumentException
+ if <code>(blockSize < 1) || (blockSize > 9)</code>.
+ @throws NullPointerException
+ if <code>out == null</code>.
+
+ @see #MIN_BLOCKSIZE
+ @see #MAX_BLOCKSIZE]]>
+ </doc>
+ </constructor>
+ <method name="hbMakeCodeLengths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="len" type="char[]"/>
+ <param name="freq" type="int[]"/>
+ <param name="alphaSize" type="int"/>
+ <param name="maxLen" type="int"/>
+ <doc>
+ <![CDATA[This method is accessible by subclasses for historical purposes. If you
+ don't know what it does then you don't need it.]]>
+ </doc>
+ </method>
+ <method name="chooseBlockSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputLength" type="long"/>
+ <doc>
+ <![CDATA[Chooses a blocksize based on the given length of the data to compress.
+
+ @return The blocksize, between {@link #MIN_BLOCKSIZE} and
+ {@link #MAX_BLOCKSIZE} both inclusive. For a negative
+ <tt>inputLength</tt> this method returns <tt>MAX_BLOCKSIZE</tt>
+ always.
+
+ @param inputLength
+ The length of the data which will be compressed by
+ <tt>CBZip2OutputStream</tt>.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Overriden to close the stream.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBlockSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the blocksize parameter specified at construction time.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offs" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="MIN_BLOCKSIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The minimum supported blocksize <tt> == 1</tt>.]]>
+ </doc>
+ </field>
+ <field name="MAX_BLOCKSIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The maximum supported blocksize <tt> == 9</tt>.]]>
+ </doc>
+ </field>
+ <field name="SETMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="CLEARMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="GREATER_ICOST" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="LESSER_ICOST" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="SMALL_THRESH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="DEPTH_THRESH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="WORK_FACTOR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="QSORT_STACK_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.
+ <p>
+ If you are ever unlucky/improbable enough to get a stack overflow whilst
+ sorting, increase the following constant and try again. In practice I
+ have never seen the stack go above 27 elems, so the following limit seems
+ very generous.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An output stream that compresses into the BZip2 format (without the file
+ header chars) into another stream.
+
+ <p>
+ The compression requires large amounts of memory. Thus you should call the
+ {@link #close() close()} method as soon as possible, to force
+ <tt>CBZip2OutputStream</tt> to release the allocated memory.
+ </p>
+
+ <p>
+ You can shrink the amount of allocated memory and maybe raise the compression
+ speed by choosing a lower blocksize, which in turn may cause a lower
+ compression ratio. You can avoid unnecessary memory allocation by avoiding
+ using a blocksize which is bigger than the size of the input.
+ </p>
+
+ <p>
+ You can compute the memory usage for compressing by the following formula:
+ </p>
+
+ <pre>
+ &lt;code&gt;400k + (9 * blocksize)&lt;/code&gt;.
+ </pre>
+
+ <p>
+ To get the memory required for decompression by {@link CBZip2InputStream
+ CBZip2InputStream} use
+ </p>
+
+ <pre>
+ &lt;code&gt;65k + (5 * blocksize)&lt;/code&gt;.
+ </pre>
+
+ <table width="100%" border="1">
+ <colgroup> <col width="33%" /> <col width="33%" /> <col width="33%" />
+ </colgroup>
+ <tr>
+ <th colspan="3">Memory usage by blocksize</th>
+ </tr>
+ <tr>
+ <th align="right">Blocksize</th> <th align="right">Compression<br>
+ memory usage</th> <th align="right">Decompression<br>
+ memory usage</th>
+ </tr>
+ <tr>
+ <td align="right">100k</td>
+ <td align="right">1300k</td>
+ <td align="right">565k</td>
+ </tr>
+ <tr>
+ <td align="right">200k</td>
+ <td align="right">2200k</td>
+ <td align="right">1065k</td>
+ </tr>
+ <tr>
+ <td align="right">300k</td>
+ <td align="right">3100k</td>
+ <td align="right">1565k</td>
+ </tr>
+ <tr>
+ <td align="right">400k</td>
+ <td align="right">4000k</td>
+ <td align="right">2065k</td>
+ </tr>
+ <tr>
+ <td align="right">500k</td>
+ <td align="right">4900k</td>
+ <td align="right">2565k</td>
+ </tr>
+ <tr>
+ <td align="right">600k</td>
+ <td align="right">5800k</td>
+ <td align="right">3065k</td>
+ </tr>
+ <tr>
+ <td align="right">700k</td>
+ <td align="right">6700k</td>
+ <td align="right">3565k</td>
+ </tr>
+ <tr>
+ <td align="right">800k</td>
+ <td align="right">7600k</td>
+ <td align="right">4065k</td>
+ </tr>
+ <tr>
+ <td align="right">900k</td>
+ <td align="right">8500k</td>
+ <td align="right">4565k</td>
+ </tr>
+ </table>
+
+ <p>
+ For decompression <tt>CBZip2InputStream</tt> allocates less memory if the
+ bzipped input is smaller than one block.
+ </p>
+
+ <p>
+ Instances of this class are not threadsafe.
+ </p>
+
+ <p>
+ TODO: Update to BZip2 1.0.1
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.bzip2.CBZip2OutputStream -->
+</package>
+<package name="org.apache.hadoop.io.compress.lzo">
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor -->
+ <class name="LzoCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="LzoCompressor" type="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified {@link CompressionStrategy}.
+
+ @param strategy lzo compression algorithm to use
+ @param directBufferSize size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="LzoCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default lzo1x_1 compression.]]>
+ </doc>
+ </constructor>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if lzo compressors are loaded and initialized.
+
+ @return <code>true</code> if lzo compressors are loaded & initialized,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of bytes given to this compressor since last reset.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of bytes consumed by callers of compress since last reset.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Noop.]]>
+ </doc>
+ </method>
+ <field name="LZO_LIBRARY_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the lzo algorithm.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy -->
+ <class name="LzoCompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression algorithm for lzo library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor -->
+ <class name="LzoDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="LzoDecompressor" type="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new lzo decompressor.
+
+ @param strategy lzo decompression algorithm
+ @param directBufferSize size of the direct-buffer]]>
+ </doc>
+ </constructor>
+ <constructor name="LzoDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new lzo decompressor.]]>
+ </doc>
+ </constructor>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if lzo decompressors are loaded and initialized.
+
+ @return <code>true</code> if lzo decompressors are loaded & initialized,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <field name="LZO_LIBRARY_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the lzo algorithm.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy -->
+ <class name="LzoDecompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy -->
+</package>
+<package name="org.apache.hadoop.io.compress.zlib">
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <class name="BuiltInZlibDeflater" extends="java.util.zip.Deflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="BuiltInZlibDeflater" type="int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Deflater to make it conform
+ to org.apache.hadoop.io.compress.Compressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <class name="BuiltInZlibInflater" extends="java.util.zip.Inflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="BuiltInZlibInflater" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibInflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Inflater to make it conform
+ to org.apache.hadoop.io.compress.Decompressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <class name="ZlibCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="ZlibCompressor" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified compression level.
+ Compressed data will be generated in ZLIB format.
+
+ @param level Compression level #CompressionLevel
+ @param strategy Compression strategy #CompressionStrategy
+ @param header Compression header #CompressionHeader
+ @param directBufferSize Size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default compression level.
+ Compressed data will be generated in ZLIB format.]]>
+ </doc>
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <class name="ZlibCompressor.CompressionHeader" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The type of header for compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <class name="ZlibCompressor.CompressionLevel" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <class name="ZlibCompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <class name="ZlibDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="ZlibDecompressor" type="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new decompressor.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <class name="ZlibDecompressor.CompressionHeader" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The headers to detect from compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+ <class name="ZlibFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ZlibFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeZlibLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-zlib code is loaded & initialized correctly and
+ can be loaded for this job.
+
+ @param conf configuration
+ @return <code>true</code> if native-zlib is loaded & initialized
+ and can be loaded for this job, else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of factories to create the right
+ zlib/gzip compressor/decompressor instances.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+</package>
+<package name="org.apache.hadoop.io.retry">
+ <!-- start class org.apache.hadoop.io.retry.RetryPolicies -->
+ <class name="RetryPolicies" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryPolicies"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="retryUpToMaximumCountWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumTimeWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxTime" type="long"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying for a maximum time, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumCountWithProportionalSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by the number of tries so far.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="exponentialBackoffRetry" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by a random
+ number in the range of [0, 2 to the number of retries)
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map&lt;java.lang.Class&lt;? extends java.lang.Exception&gt;, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByRemoteException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map&lt;java.lang.Class&lt;? extends java.lang.Exception&gt;, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ A retry policy for RemoteException
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <field name="TRY_ONCE_THEN_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail by re-throwing the exception.
+ This corresponds to having no retry mechanism in place.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="TRY_ONCE_DONT_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail silently for <code>void</code> methods, or by
+ re-throwing the exception for non-<code>void</code> methods.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="RETRY_FOREVER" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Keep trying forever.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A collection of useful implementations of {@link RetryPolicy}.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryPolicies -->
+ <!-- start interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <interface name="RetryPolicy" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="shouldRetry" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Exception"/>
+ <param name="retries" type="int"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[<p>
+ Determines whether the framework should retry a
+ method for the given exception, and the number
+ of retries that have been made for that operation
+ so far.
+ </p>
+ @param e The exception that caused the method to fail.
+ @param retries The number of times the method has been retried.
+ @return <code>true</code> if the method should be retried,
+ <code>false</code> if the method should not be retried
+ but shouldn't fail with an exception (only for void methods).
+ @throws Exception The re-thrown exception <code>e</code> indicating
+ that the method failed and should not be retried further.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Specifies a policy for retrying method failures.
+ Implementations of this interface should be immutable.
+ </p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <!-- start class org.apache.hadoop.io.retry.RetryProxy -->
+ <class name="RetryProxy" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryProxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class&lt;?&gt;"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the same retry policy for each method in the interface.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param retryPolicy the policy for retirying method call failures
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class&lt;?&gt;"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="methodNameToPolicyMap" type="java.util.Map&lt;java.lang.String, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the a set of retry policies specified by method name.
+ If no retry policy is defined for a method then a default of
+ {@link RetryPolicies#TRY_ONCE_THEN_FAIL} is used.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param methodNameToPolicyMap a map of method names to retry policies
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for creating retry proxies.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryProxy -->
+</package>
+<package name="org.apache.hadoop.io.serializer">
+ <!-- start interface org.apache.hadoop.io.serializer.Deserializer -->
+ <interface name="Deserializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the deserializer for reading.</p>]]>
+ </doc>
+ </method>
+ <method name="deserialize" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ Deserialize the next object from the underlying input stream.
+ If the object <code>t</code> is non-null then this deserializer
+ <i>may</i> set its internal state to the next object read from the input
+ stream. Otherwise, if the object <code>t</code> is null a new
+ deserialized object will be created.
+ </p>
+ @return the deserialized object]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying input stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for deserializing objects of type <T> from an
+ {@link InputStream}.
+ </p>
+
+ <p>
+ Deserializers are stateful, but must not buffer the input since
+ other producers may read from the input between calls to
+ {@link #deserialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Deserializer -->
+ <!-- start class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <class name="DeserializerComparator" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator&lt;T&gt;"/>
+ <constructor name="DeserializerComparator" type="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link Deserializer} to deserialize
+ the objects to be compared so that the standard {@link Comparator} can
+ be used to compare them.
+ </p>
+ <p>
+ One may optimize compare-intensive operations by using a custom
+ implementation of {@link RawComparator} that operates directly
+ on byte representations.
+ </p>
+ @param <T>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <class name="JavaSerialization" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization&lt;java.io.Serializable&gt;"/>
+ <constructor name="JavaSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;java.io.Serializable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;java.io.Serializable&gt;"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;java.io.Serializable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;java.io.Serializable&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ An experimental {@link Serialization} for Java {@link Serializable} classes.
+ </p>
+ @see JavaSerializationComparator]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <class name="JavaSerializationComparator" extends="org.apache.hadoop.io.serializer.DeserializerComparator&lt;T&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JavaSerializationComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o1" type="T extends java.io.Serializable &amp; java.lang.Comparable&lt;T&gt;"/>
+ <param name="o2" type="T extends java.io.Serializable &amp; java.lang.Comparable&lt;T&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link JavaSerialization}
+ {@link Deserializer} to deserialize objects that are then compared via
+ their {@link Comparable} interfaces.
+ </p>
+ @param <T>
+ @see JavaSerialization]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serialization -->
+ <interface name="Serialization" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Allows clients to test whether this {@link Serialization}
+ supports the given class.]]>
+ </doc>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[@return a {@link Serializer} for the given class.]]>
+ </doc>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[@return a {@link Deserializer} for the given class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Encapsulates a {@link Serializer}/{@link Deserializer} pair.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serialization -->
+ <!-- start class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <class name="SerializationFactory" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SerializationFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Serializations are found by reading the <code>io.serializations</code>
+ property from <code>conf</code>, which is a comma-delimited list of
+ classnames.
+ </p>]]>
+ </doc>
+ </constructor>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <method name="getSerialization" return="org.apache.hadoop.io.serializer.Serialization&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for {@link Serialization}s.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serializer -->
+ <interface name="Serializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the serializer for writing.</p>]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Serialize <code>t</code> to the underlying output stream.</p>]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying output stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for serializing objects of type <T> to an
+ {@link OutputStream}.
+ </p>
+
+ <p>
+ Serializers are stateful, but must not buffer the output since
+ other producers may write to the output between calls to
+ {@link #serialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serializer -->
+ <!-- start class org.apache.hadoop.io.serializer.WritableSerialization -->
+ <class name="WritableSerialization" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization&lt;org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="WritableSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;org.apache.hadoop.io.Writable&gt;"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;org.apache.hadoop.io.Writable&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Serialization} for {@link Writable}s that delegates to
+ {@link Writable#write(java.io.DataOutput)} and
+ {@link Writable#readFields(java.io.DataInput)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.WritableSerialization -->
+</package>
+<package name="org.apache.hadoop.ipc">
+ <!-- start class org.apache.hadoop.ipc.Client -->
+ <class name="Client" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Client" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;, org.apache.hadoop.conf.Configuration, javax.net.SocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client whose values are of the given {@link Writable}
+ class.]]>
+ </doc>
+ </constructor>
+ <constructor name="Client" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client with the default SocketFactory
+ @param valueClass
+ @param conf]]>
+ </doc>
+ </constructor>
+ <method name="setPingInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="pingInterval" type="int"/>
+ <doc>
+ <![CDATA[set the ping interval value in configuration
+
+ @param conf Configuration
+ @param pingInterval the ping interval]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop all threads related to this client. No further calls may be made
+ using this client.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a call, passing <code>param</code>, to the IPC server running at
+ <code>address</code>, returning the value. Throws exceptions if there are
+ network problems or if the remote code threw an exception.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="params" type="org.apache.hadoop.io.Writable[]"/>
+ <param name="addresses" type="java.net.InetSocketAddress[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Makes a set of calls in parallel. Each parameter is sent to the
+ corresponding address. When all values are available, or have timed out
+ or errored, the collected results are returned in an array. The array
+ contains nulls for calls that timed out or errored.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A client for an IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Server]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Client -->
+ <!-- start class org.apache.hadoop.ipc.RemoteException -->
+ <class name="RemoteException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RemoteException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lookupTypes" type="java.lang.Class[]"/>
+ <doc>
+ <![CDATA[If this remote exception wraps up one of the lookupTypes
+ then return this exception.
+ <p>
+ Unwraps any IOException.
+
+ @param lookupTypes the desired exception class.
+ @return IOException, which is either the lookupClass exception or this.]]>
+ </doc>
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Instantiate and return the exception wrapped up by this remote exception.
+
+ <p> This unwraps any <code>Throwable</code> that has a constructor taking
+ a <code>String</code> as a parameter.
+ Otherwise it returns this.
+
+ @return <code>Throwable]]>
+ </doc>
+ </method>
+ <method name="writeXml"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="doc" type="org.znerd.xmlenc.XMLOutputter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the object to XML format]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.ipc.RemoteException"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attrs" type="org.xml.sax.Attributes"/>
+ <doc>
+ <![CDATA[Create RemoteException from attributes]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RemoteException -->
+ <!-- start class org.apache.hadoop.ipc.RPC -->
+ <class name="RPC" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="waitForProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object with the default SocketFactory
+
+ @param protocol
+ @param clientVersion
+ @param addr
+ @param conf
+ @return a proxy instance
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopProxy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="proxy" type="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <doc>
+ <![CDATA[Stop this proxy and release its invoker's resource
+ @param proxy the proxy to be stopped]]>
+ </doc>
+ </method>
+ <method name="call" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="method" type="java.lang.reflect.Method"/>
+ <param name="params" type="java.lang.Object[][]"/>
+ <param name="addrs" type="java.net.InetSocketAddress[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Expert: Make multiple, parallel calls to a set of servers.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="numHandlers" type="int"/>
+ <param name="verbose" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple RPC mechanism.
+
+ A <i>protocol</i> is a Java interface. All parameters and return types must
+ be one of:
+
+ <ul> <li>a primitive type, <code>boolean</code>, <code>byte</code>,
+ <code>char</code>, <code>short</code>, <code>int</code>, <code>long</code>,
+ <code>float</code>, <code>double</code>, or <code>void</code>; or</li>
+
+ <li>a {@link String}; or</li>
+
+ <li>a {@link Writable}; or</li>
+
+ <li>an array of the above types</li> </ul>
+
+ All methods in the protocol should throw only IOException. No field data of
+ the protocol instance is transmitted.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC -->
+ <!-- start class org.apache.hadoop.ipc.RPC.Server -->
+ <class name="RPC.Server" extends="org.apache.hadoop.ipc.Server"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on]]>
+ </doc>
+ </constructor>
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on
+ @param numHandlers the number of method handler threads to run
+ @param verbose whether each call should be logged]]>
+ </doc>
+ </constructor>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receivedTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An RPC Server.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.Server -->
+ <!-- start class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <class name="RPC.VersionMismatch" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.VersionMismatch" type="java.lang.String, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a version mismatch exception
+ @param interfaceName the name of the protocol mismatch
+ @param clientVersion the client's version of the protocol
+ @param serverVersion the server's version of the protocol]]>
+ </doc>
+ </constructor>
+ <method name="getInterfaceName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the interface name
+ @return the java class name
+ (eg. org.apache.hadoop.mapred.InterTrackerProtocol)]]>
+ </doc>
+ </method>
+ <method name="getClientVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the client's preferred version]]>
+ </doc>
+ </method>
+ <method name="getServerVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the server's agreed to version.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A version mismatch for the RPC protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <!-- start class org.apache.hadoop.ipc.Server -->
+ <class name="Server" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;, int, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;, int, org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a server listening on the named port and address. Parameters passed must
+ be of the named class. The <code>handlerCount</handlerCount> determines
+ the number of handler threads that will be used to process calls.]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.ipc.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the server instance called under or null. May be called under
+ {@link #call(Writable, long)} implementations, and under {@link Writable}
+ methods of paramters and return values. Permits applications to access
+ the server context.]]>
+ </doc>
+ </method>
+ <method name="getRemoteIp" return="java.net.InetAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the remote side ip address when invoked inside an RPC
+ Returns null incase of an error.]]>
+ </doc>
+ </method>
+ <method name="getRemoteAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns remote address as a string when invoked inside an RPC.
+ Returns null in case of an error.]]>
+ </doc>
+ </method>
+ <method name="bind"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.ServerSocket"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <param name="backlog" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A convenience method to bind to a given address and report
+ better exceptions if the address is not a valid host.
+ @param socket the socket to bind
+ @param address the address to bind to
+ @param backlog the number of connections allowed in the queue
+ @throws BindException if the address can't be bound
+ @throws UnknownHostException if the address isn't a valid host name
+ @throws IOException other random errors from bind]]>
+ </doc>
+ </method>
+ <method name="setSocketSendBufSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Sets the socket buffer size used for responding to RPCs]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts the service. Must be called before any calls will be handled.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops the service. No new calls will be handled after this is called.]]>
+ </doc>
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Wait for the server to be stopped.
+ Does not wait for all subthreads to finish.
+ See {@link #stop()}.]]>
+ </doc>
+ </method>
+ <method name="getListenerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the socket (ip+port) on which the RPC server is listening to.
+ @return the socket (ip+port) on which the RPC server is listening to.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receiveTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called for each call.]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <field name="HEADER" type="java.nio.ByteBuffer"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The first four bytes of Hadoop RPC connections]]>
+ </doc>
+ </field>
+ <field name="CURRENT_VERSION" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="rpcMetrics" type="org.apache.hadoop.ipc.metrics.RpcMetrics"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Client]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Server -->
+ <!-- start interface org.apache.hadoop.ipc.VersionedProtocol -->
+ <interface name="VersionedProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return protocol version corresponding to protocol interface.
+ @param protocol The classname of the protocol interface
+ @param clientVersion The version of the protocol that the client speaks
+ @return the version that the server will speak]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Superclass of all protocols that use Hadoop RPC.
+ Subclasses of this interface are also supposed to have
+ a static final long versionID field.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.VersionedProtocol -->
+</package>
+<package name="org.apache.hadoop.ipc.metrics">
+ <!-- start class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <class name="RpcMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="RpcMetrics" type="java.lang.String, java.lang.String, org.apache.hadoop.ipc.Server"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Push the metrics to the monitoring subsystem on doUpdate() call.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="rpcQueueTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The metrics variables are public:
+ - they can be set directly by calling their set/inc methods
+ -they can also be read directly - e.g. JMX does this.]]>
+ </doc>
+ </field>
+ <field name="rpcProcessingTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="metricsList" type="java.util.Map&lt;java.lang.String, org.apache.hadoop.metrics.util.MetricsTimeVaryingRate&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various RPC statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #rpcQueueTime}.inc(time)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <!-- start interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+ <interface name="RpcMgtMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRpcOpsNumber" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of RPC Operations in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for RPC Operations in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Average RPC Operation Queued Time in the last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all min max times]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for the RPC layer.
+ Many of the statistics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the statistics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ rpc.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+</package>
+<package name="org.apache.hadoop.log">
+ <!-- start class org.apache.hadoop.log.LogLevel -->
+ <class name="LogLevel" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[A command line implementation]]>
+ </doc>
+ </method>
+ <field name="USAGES" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Change log level in runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel -->
+ <!-- start class org.apache.hadoop.log.LogLevel.Servlet -->
+ <class name="LogLevel.Servlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel.Servlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A servlet implementation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel.Servlet -->
+</package>
+<package name="org.apache.hadoop.mapred">
+ <!-- start class org.apache.hadoop.mapred.ClusterStatus -->
+ <class name="ClusterStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of task trackers in the cluster.
+
+ @return the number of task trackers in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running map tasks in the cluster.
+
+ @return the number of currently running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running reduce tasks in the cluster.
+
+ @return the number of currently running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running map tasks in the cluster.
+
+ @return the maximum capacity for running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running reduce tasks in the cluster.
+
+ @return the maximum capacity for running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getJobTrackerState" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current state of the <code>JobTracker</code>,
+ as {@link JobTracker.State}
+
+ @return the current state of the <code>JobTracker</code>.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Status information on the current state of the Map-Reduce cluster.
+
+ <p><code>ClusterStatus</code> provides clients with information such as:
+ <ol>
+ <li>
+ Size of the cluster.
+ </li>
+ <li>
+ Task capacity of the cluster.
+ </li>
+ <li>
+ The number of currently running map & reduce tasks.
+ </li>
+ <li>
+ State of the <code>JobTracker</code>.
+ </li>
+ </ol></p>
+
+ <p>Clients can query for the latest <code>ClusterStatus</code>, via
+ {@link JobClient#getClusterStatus()}.</p>
+
+ @see JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ClusterStatus -->
+ <!-- start class org.apache.hadoop.mapred.Counters -->
+ <class name="Counters" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.mapred.Counters.Group&gt;"/>
+ <constructor name="Counters"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getGroupNames" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all counter classes.
+ @return Set of counter names.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.mapred.Counters.Group&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getGroup" return="org.apache.hadoop.mapred.Counters.Group"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="groupName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the named counter group, or an empty group if there is none
+ with the specified name.]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Find the counter for the given enum. The same enum will always return the
+ same counter.
+ @param key the counter key
+ @return the matching counter object]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Find a counter given the group and the name.
+ @param group the name of the group
+ @param name the internal name of the counter
+ @return the counter for that name]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="group" type="java.lang.String"/>
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Find a counter by using strings
+ @param group the name of the group
+ @param id the id of the counter within the group (0 to N-1)
+ @param name the internal name of the counter
+ @return the counter for that name
+ @deprecated]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param key identifies a counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="counter" type="java.lang.String"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param group the name of the group
+ @param counter the internal name of the counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Returns current value of the specified counter, or 0 if the counter
+ does not exist.]]>
+ </doc>
+ </method>
+ <method name="incrAllCounters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Increments multiple counters by their amounts in another Counters
+ instance.
+ @param other the other Counters instance]]>
+ </doc>
+ </method>
+ <method name="sum" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.mapred.Counters"/>
+ <param name="b" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Convenience method for computing the sum of two sets of counters.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of counters, by summing the number of counters
+ in each group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the set of groups.
+ The external format is:
+ #groups (groupName group)*
+
+ i.e. the number of groups followed by 0 or more groups, where each
+ group is of the form:
+
+ groupDisplayName #counters (false | true counter)*
+
+ where each counter is of the form:
+
+ name (false | true displayName) value]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a set of groups.]]>
+ </doc>
+ </method>
+ <method name="log"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Logs the current counter values.
+ @param log The log to use.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return textual representation of the counter values.]]>
+ </doc>
+ </method>
+ <method name="makeCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert a counters object into a single line that is easy to parse.
+ @return the string with "name=value" for each counter and separated by ","]]>
+ </doc>
+ </method>
+ <method name="makeEscapedCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Represent the counter in a textual format that can be converted back to
+ its object form
+ @return the string in the following format
+ {(groupname)(group-displayname)[(countername)(displayname)(value)][][]}{}{}]]>
+ </doc>
+ </method>
+ <method name="fromEscapedCompactString" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compactString" type="java.lang.String"/>
+ <exception name="ParseException" type="java.text.ParseException"/>
+ <doc>
+ <![CDATA[Convert a stringified counter representation into a counter object. Note
+ that the counter can be recovered if its stringified using
+ {@link #makeEscapedCompactString()}.
+ @return a Counter]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A set of named counters.
+
+ <p><code>Counters</code> represent global counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> can be of
+ any {@link Enum} type.</p>
+
+ <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
+ counters from a particular <code>Enum</code> class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Counter -->
+ <class name="Counters.Counter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the internal name of the counter.
+ @return the internal name of the counter]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the counter.
+ @return the user facing name of the counter]]>
+ </doc>
+ </method>
+ <method name="setDisplayName"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="displayName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the display name of the counter.]]>
+ </doc>
+ </method>
+ <method name="makeEscapedCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compact stringified version of the counter in the format
+ [(actual-name)(display-name)(value)]]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[What is the current value of this counter?
+ @return the current value]]>
+ </doc>
+ </method>
+ <method name="increment"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Increment this counter by the given value
+ @param incr the value to increase this counter by]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A counter record, comprising its name and value.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Counter -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Group -->
+ <class name="Counters.Group" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.mapred.Counters.Counter&gt;"/>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns raw name of the group. This is the name of the enum class
+ for this group of counters.]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns localized name of the group. This is the same as getName() by
+ default, but different if an appropriate ResourceBundle is found.]]>
+ </doc>
+ </method>
+ <method name="setDisplayName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="displayName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the display name]]>
+ </doc>
+ </method>
+ <method name="makeEscapedCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compact stringified version of the group in the format
+ {(actual-name)(display-name)(value)[][][]} where [] are compact strings for the
+ counters within.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="counterName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the specified counter, or 0 if the counter does
+ not exist.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getCounter(String)} instead">
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given id and create it if it doesn't exist.
+ @param id the numeric id of the counter within the group
+ @param name the internal counter name
+ @return the counter
+ @deprecated use {@link #getCounter(String)} instead]]>
+ </doc>
+ </method>
+ <method name="getCounterForName" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given name and create it if it doesn't exist.
+ @param name the internal counter name
+ @return the counter]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of counters in this group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.mapred.Counters.Counter&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<code>Group</code> of counters, comprising of counters from a particular
+ counter {@link Enum} class.
+
+ <p><code>Group</code>handles localization of the class name and the
+ counter names.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Group -->
+ <!-- start class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <class name="DefaultJobHistoryParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DefaultJobHistoryParser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseJobTasks"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobHistoryFile" type="java.lang.String"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobHistory.JobInfo"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Populates a JobInfo object from the job's history log file.
+ @param jobHistoryFile history file for this job.
+ @param job a precreated JobInfo object, should be non-null.
+ @param fs FileSystem where historyFile is present.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Default parser for job history files. It creates object model from
+ job history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <!-- start class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <class name="FileAlreadyExistsException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileAlreadyExistsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileAlreadyExistsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when target file already exists for any operation and
+ is not configured to be overwritten.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <!-- start class org.apache.hadoop.mapred.FileInputFormat -->
+ <class name="FileInputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <constructor name="FileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setMinSplitSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="minSplitSize" type="long"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="filename" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Is the given filename splitable? Usually, true, but if the file is
+ stream compressed, it will not be.
+
+ <code>FileInputFormat</code> implementations can override this and return
+ <code>false</code> to ensure that individual input files are never split-up
+ so that {@link Mapper}s process entire files.
+
+ @param fs the file system that the file is on
+ @param filename the file name to check
+ @return is this file splitable?]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setInputPathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="filter" type="java.lang.Class&lt;? extends org.apache.hadoop.fs.PathFilter&gt;"/>
+ <doc>
+ <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job.
+
+ @param filter the PathFilter class use for filtering the input paths.]]>
+ </doc>
+ </method>
+ <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get a PathFilter instance of the filter set for the input paths.
+
+ @return the PathFilter instance set for the job, NULL if none has been set.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression.
+
+ @param job the job to list input paths for
+ @return array of FileStatus objects
+ @throws IOException if zero items.]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Splits files returned by {@link #listStatus(JobConf)} when
+ they're too big.]]>
+ </doc>
+ </method>
+ <method name="computeSplitSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="goalSize" type="long"/>
+ <param name="minSize" type="long"/>
+ <param name="blockSize" type="long"/>
+ </method>
+ <method name="getBlockIndex" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+ <param name="offset" type="long"/>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the given comma separated paths as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be set as
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add the given comma separated paths to the list of inputs for
+ the map-reduce job.
+
+ @param conf The configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be added to
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Set the array of {@link Path}s as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job.
+ @param inputPaths the {@link Path}s of the input directories/files
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+
+ @param conf The configuration of the job
+ @return the list of input {@link Path}s for the map-reduce job.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class for file-based {@link InputFormat}.
+
+ <p><code>FileInputFormat</code> is the base class for all file-based
+ <code>InputFormat</code>s. This provides a generic implementation of
+ {@link #getSplits(JobConf, int)}.
+ Subclasses of <code>FileInputFormat</code> can also override the
+ {@link #isSplitable(FileSystem, Path)} method to ensure input-files are
+ not split-up and are processed as a whole by {@link Mapper}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileOutputCommitter -->
+ <class name="FileOutputCommitter" extends="org.apache.hadoop.mapred.OutputCommitter"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileOutputCommitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setupJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cleanupJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setupTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="commitTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="abortTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ </method>
+ <method name="needsTaskCommit" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TEMP_DIR_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Temporary directory name]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An {@link OutputCommitter} that commits files specified
+ in job output directory i.e. ${mapred.output.dir}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileOutputCommitter -->
+ <!-- start class org.apache.hadoop.mapred.FileOutputFormat -->
+ <class name="FileOutputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="FileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setCompressOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+ <code>false</code> otherwise]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+ compress the job outputs]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the
+ job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the {@link Path} of the output directory for the map-reduce job.
+
+ @param conf The configuration of the job.
+ @param outputDir the {@link Path} of the output directory for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the output directory for the map-reduce job.
+
+ @return the {@link Path} to the output directory for the map-reduce job.
+ @see FileOutputFormat#getWorkOutputPath(JobConf)]]>
+ </doc>
+ </method>
+ <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the task's temporary output directory
+ for the map-reduce job
+
+ <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4>
+
+ <p><i>Note:</i> The following is valid only if the {@link OutputCommitter}
+ is {@link FileOutputCommitter}. If <code>OutputCommitter</code> is not
+ a <code>FileOutputCommitter</code>, the task's temporary output
+ directory is same as {@link #getOutputPath(JobConf)} i.e.
+ <tt>${mapred.output.dir}$</tt></p>
+
+ <p>Some applications need to create/write-to side-files, which differ from
+ the actual job-outputs.
+
+ <p>In such cases there could be issues with 2 instances of the same TIP
+ (running simultaneously e.g. speculative tasks) trying to open/write-to the
+ same file (path) on HDFS. Hence the application-writer will have to pick
+ unique names per task-attempt (e.g. using the attemptid, say
+ <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p>
+
+ <p>To get around this the Map-Reduce framework helps the application-writer
+ out by maintaining a special
+ <tt>${mapred.output.dir}/_temporary/_${taskid}</tt>
+ sub-directory for each task-attempt on HDFS where the output of the
+ task-attempt goes. On successful completion of the task-attempt the files
+ in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only)
+ are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the
+ framework discards the sub-directory of unsuccessful task-attempts. This
+ is completely transparent to the application.</p>
+
+ <p>The application-writer can take advantage of this by creating any
+ side-files required in <tt>${mapred.work.output.dir}</tt> during execution
+ of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the
+ framework will move them out similarly - thus she doesn't have to pick
+ unique paths per task-attempt.</p>
+
+ <p><i>Note</i>: the value of <tt>${mapred.work.output.dir}</tt> during
+ execution of a particular task-attempt is actually
+ <tt>${mapred.output.dir}/_temporary/_{$taskid}</tt>, and this value is
+ set by the map-reduce framework. So, just create any side-files in the
+ path returned by {@link #getWorkOutputPath(JobConf)} from map/reduce
+ task to take advantage of this feature.</p>
+
+ <p>The entire discussion holds true for maps of jobs with
+ reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
+ goes directly to HDFS.</p>
+
+ @return the {@link Path} to the task's temporary output directory
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to create the task's temporary output directory and
+ return the path to the task's output file.
+
+ @param conf job-configuration
+ @param name temporary task-output filename
+ @return path to the task's temporary output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUniqueName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Helper function to generate a name that is unique for the task.
+
+ <p>The generated name can be used to create custom files from within the
+ different tasks for the job, the names for different tasks will not collide
+ with each other.</p>
+
+ <p>The given name is postfixed with the task type, 'm' for maps, 'r' for
+ reduces and the task partition number. For example, give a name 'test'
+ running on the first map o the job the generated name will be
+ 'test-m-00000'.</p>
+
+ @param conf the configuration for the job.
+ @param name the name to make unique.
+ @return a unique name accross all tasks of the job.]]>
+ </doc>
+ </method>
+ <method name="getPathForCustomFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Helper function to generate a {@link Path} for a file that is unique for
+ the task within the job output directory.
+
+ <p>The path can be used to create custom files from within the map and
+ reduce tasks. The path name will be unique for each task. The path parent
+ will be the job output directory.</p>ls
+
+ <p>This method uses the {@link #getUniqueName} method to make the file name
+ unique for the task.</p>
+
+ @param conf the configuration for the job.
+ @param name the name for the file.
+ @return a unique path accross all tasks of the job.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base class for {@link OutputFormat}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileSplit -->
+ <class name="FileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[Constructs a split.
+ @deprecated
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process]]>
+ </doc>
+ </constructor>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null]]>
+ </doc>
+ </constructor>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file containing this split's data.]]>
+ </doc>
+ </method>
+ <method name="getStart" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The position of the first byte in the file to process.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the file to process.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A section of an input file. Returned by {@link
+ InputFormat#getSplits(JobConf, int)} and passed to
+ {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileSplit -->
+ <!-- start class org.apache.hadoop.mapred.ID -->
+ <class name="ID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable&lt;org.apache.hadoop.mapred.ID&gt;"/>
+ <constructor name="ID" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructs an ID object from the given int]]>
+ </doc>
+ </constructor>
+ <constructor name="ID"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[returns the int which represents the identifier]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare IDs by associated numbers]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.ID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.ID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct an ID object from given string
+
+ @return constructed Id object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A general identifier, which internally stores the id
+ as an integer. This is the super class of {@link JobID},
+ {@link TaskID} and {@link TaskAttemptID}.
+
+ @see JobID
+ @see TaskID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ID -->
+ <!-- start interface org.apache.hadoop.mapred.InputFormat -->
+ <interface name="InputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically split the set of input files for the job.
+
+ <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper}
+ for processing.</p>
+
+ <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the
+ input files are not physically split into chunks. For e.g. a split could
+ be <i>&lt;input-file-path, start, offset&gt;</i> tuple.
+
+ @param job job configuration.
+ @param numSplits the desired number of splits, a hint.
+ @return an array of {@link InputSplit}s for the job.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordReader} for the given {@link InputSplit}.
+
+ <p>It is the responsibility of the <code>RecordReader</code> to respect
+ record boundaries while processing the logical split to present a
+ record-oriented view to the individual task.</p>
+
+ @param split the {@link InputSplit}
+ @param job the job that this split belongs to
+ @return a {@link RecordReader}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputFormat</code> describes the input-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the input-specification of the job.
+ <li>
+ Split-up the input file(s) into logical {@link InputSplit}s, each of
+ which is then assigned to an individual {@link Mapper}.
+ </li>
+ <li>
+ Provide the {@link RecordReader} implementation to be used to glean
+ input records from the logical <code>InputSplit</code> for processing by
+ the {@link Mapper}.
+ </li>
+ </ol>
+
+ <p>The default behavior of file-based {@link InputFormat}s, typically
+ sub-classes of {@link FileInputFormat}, is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of the input files. However, the {@link FileSystem} blocksize of
+ the input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Clearly, logical splits based on input-size is insufficient for many
+ applications since record boundaries are to respected. In such cases, the
+ application has to also implement a {@link RecordReader} on whom lies the
+ responsibilty to respect record-boundaries and present a record-oriented
+ view of the logical <code>InputSplit</code> to the individual task.
+
+ @see InputSplit
+ @see RecordReader
+ @see JobClient
+ @see FileInputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.InputSplit -->
+ <interface name="InputSplit" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the total number of bytes in the data of the <code>InputSplit</code>.
+
+ @return the number of bytes in the input split.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hostnames where the input split is located.
+
+ @return list of hostnames where data of the <code>InputSplit</code> is
+ located as an array of <code>String</code>s.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputSplit</code> represents the data to be processed by an
+ individual {@link Mapper}.
+
+ <p>Typically, it presents a byte-oriented view on the input and is the
+ responsibility of {@link RecordReader} of the job to process this and present
+ a record-oriented view.
+
+ @see InputFormat
+ @see RecordReader]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputSplit -->
+ <!-- start class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <class name="InvalidFileTypeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidFileTypeException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidFileTypeException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when file type differs from the desired file type. like
+ getting a file when a directory is expected. Or a wrong file type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidInputException -->
+ <class name="InvalidInputException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidInputException" type="java.util.List&lt;java.io.IOException&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create the exception with the given list.
+ @param probs the list of problems to report. this list is not copied.]]>
+ </doc>
+ </constructor>
+ <method name="getProblems" return="java.util.List&lt;java.io.IOException&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete list of the problems reported.
+ @return the list of problems, which must not be modified]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get a summary message of the problems found.
+ @return the concatenated messages from all of the problems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class wraps a list of problems with the input, so that the user
+ can get a list of problems together instead of finding and fixing them one
+ by one.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidInputException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <class name="InvalidJobConfException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidJobConfException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidJobConfException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when jobconf misses some mendatory attributes
+ or value of some attributes is invalid.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <!-- start class org.apache.hadoop.mapred.IsolationRunner -->
+ <class name="IsolationRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IsolationRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Run a single task
+ @param args the first argument is the task directory]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.IsolationRunner -->
+ <!-- start class org.apache.hadoop.mapred.JobClient -->
+ <class name="JobClient" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobClient"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job client.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client with the given {@link JobConf}, and connect to the
+ default {@link JobTracker}.
+
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client, connect to the indicated job tracker.
+
+ @param jobTrackAddr the job tracker to connect to.
+ @param conf configuration.]]>
+ </doc>
+ </constructor>
+ <method name="getCommandLineConfig" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the command line configuration]]>
+ </doc>
+ </method>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Connect to the default {@link JobTracker}.
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the <code>JobClient</code>.]]>
+ </doc>
+ </method>
+ <method name="getFs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a filesystem handle. We need this to prepare jobs
+ for submission to the MapReduce system.
+
+ @return the filesystem handle.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobFile" type="java.lang.String"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param jobFile the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param job the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isJobDirValid" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobDirPath" type="org.apache.hadoop.fs.Path"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Checks if the job directory is clean and has all the required components
+ for (re) starting the job]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an {@link RunningJob} object to track an ongoing job. Returns
+ null if the id does not correspond to any known job.
+
+ @param jobid the jobid of the job.
+ @return the {@link RunningJob} handle to track the job, null if the
+ <code>jobid</code> doesn't correspond to any known job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getJob(JobID)}.">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getJob(JobID)}.]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the map tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the map tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getMapTaskReports(JobID)}">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getMapTaskReports(JobID)}]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the reduce tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the reduce tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCleanupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the cleanup tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the cleanup tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getSetupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the setup tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the setup tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getReduceTaskReports(JobID)}">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getReduceTaskReports(JobID)}]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the Map-Reduce cluster.
+
+ @return the status information about the Map-Reduce cluster as an object
+ of {@link ClusterStatus}.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are not completed and not failed.
+
+ @return array of {@link JobStatus} for the running/to-be-run jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are submitted.
+
+ @return array of {@link JobStatus} for the submitted jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="runJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Utility that submits a job, then polls for progress until the job is
+ complete.
+
+ @param job the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Sets the output filter for tasks. only those tasks are printed whose
+ output matches the filter.
+ @param newValue task filter.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the task output filter out of the JobConf.
+
+ @param job the JobConf to examine.
+ @return the filter level.]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Modify the JobConf to set the task output filter.
+
+ @param job the JobConf to modify.
+ @param newValue the value to set.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task output filter.
+ @return task filter.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="getDefaultMaps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the max available Maps in the cluster.
+
+ @return the max available Maps in the cluster
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDefaultReduces" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the max available Reduces in the cluster.
+
+ @return the max available Reduces in the cluster
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getSystemDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Grab the jobtracker system directory path where job-specific files are to be placed.
+
+ @return the system directory where job-specific files are to be placed.]]>
+ </doc>
+ </method>
+ <method name="getQueues" return="org.apache.hadoop.mapred.JobQueueInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array of queue information objects about all the Job Queues
+ configured.
+
+ @return Array of JobQueueInfo objects
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJobsFromQueue" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queueName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets all the jobs which were added to particular Job Queue
+
+ @param queueName name of the Job Queue
+ @return Array of jobs present in the job queue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getQueueInfo" return="org.apache.hadoop.mapred.JobQueueInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queueName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the queue information associated to a particular Job Queue
+
+ @param queueName name of the job queue.
+ @return Queue information associated to particular queue.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[<code>JobClient</code> is the primary interface for the user-job to interact
+ with the {@link JobTracker}.
+
+ <code>JobClient</code> provides facilities to submit jobs, track their
+ progress, access component-tasks' reports/logs, get the Map-Reduce cluster
+ status information etc.
+
+ <p>The job submission process involves:
+ <ol>
+ <li>
+ Checking the input and output specifications of the job.
+ </li>
+ <li>
+ Computing the {@link InputSplit}s for the job.
+ </li>
+ <li>
+ Setup the requisite accounting information for the {@link DistributedCache}
+ of the job, if necessary.
+ </li>
+ <li>
+ Copying the job's jar and configuration to the map-reduce system directory
+ on the distributed file-system.
+ </li>
+ <li>
+ Submitting the job to the <code>JobTracker</code> and optionally monitoring
+ it's status.
+ </li>
+ </ol></p>
+
+ Normally the user creates the application, describes various facets of the
+ job via {@link JobConf} and then uses the <code>JobClient</code> to submit
+ the job and monitor its progress.
+
+ <p>Here is an example on how to use <code>JobClient</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ job.setInputPath(new Path("in"));
+ job.setOutputPath(new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ </pre></blockquote></p>
+
+ <h4 id="JobControl">Job Control</h4>
+
+ <p>At times clients would chain map-reduce jobs to accomplish complex tasks
+ which cannot be done via a single map-reduce job. This is fairly easy since
+ the output of the job, typically, goes to distributed file-system and that
+ can be used as the input for the next job.</p>
+
+ <p>However, this also means that the onus on ensuring jobs are complete
+ (success/failure) lies squarely on the clients. In such situations the
+ various job-control options are:
+ <ol>
+ <li>
+ {@link #runJob(JobConf)} : submits the job and returns only after
+ the job has completed.
+ </li>
+ <li>
+ {@link #submitJob(JobConf)} : only submits the job, then poll the
+ returned handle to the {@link RunningJob} to query status and make
+ scheduling decisions.
+ </li>
+ <li>
+ {@link JobConf#setJobEndNotificationURI(String)} : setup a notification
+ on job-completion, thus avoiding polling.
+ </li>
+ </ol></p>
+
+ @see JobConf
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient -->
+ <!-- start class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <class name="JobClient.TaskStatusFilter" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobClient.TaskStatusFilter&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <!-- start class org.apache.hadoop.mapred.JobConf -->
+ <class name="JobConf" extends="org.apache.hadoop.conf.Configuration"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new map/reduce configuration where the behavior of reading from the
+ default resources can be turned off.
+ <p/>
+ If the parameter {@code loadDefaults} is false, the new instance
+ will not load resources from the default files.
+
+ @param loadDefaults specifies whether to load from the default files]]>
+ </doc>
+ </constructor>
+ <method name="getJar" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user jar for the map-reduce job.
+
+ @return the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJar"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jar" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user jar for the map-reduce job.
+
+ @param jar the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJarByClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the job's jar file by finding an example class location.
+
+ @param cls the example class.]]>
+ </doc>
+ </method>
+ <method name="getLocalDirs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="subdir" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a local file name. Files are distributed among configured
+ local directories.]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reported username for this job.
+
+ @return the username]]>
+ </doc>
+ </method>
+ <method name="setUser"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the reported username for this job.
+
+ @param user the username for this job.]]>
+ </doc>
+ </method>
+ <method name="setKeepFailedTaskFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the framework should keep the intermediate files for
+ failed tasks.
+
+ @param keep <code>true</code> if framework should keep the intermediate files
+ for failed tasks, <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="getKeepFailedTaskFiles" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should the temporary files for failed tasks be kept?
+
+ @return should the files be kept?]]>
+ </doc>
+ </method>
+ <method name="setKeepTaskFilesPattern"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pattern" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set a regular expression for task names that should be kept.
+ The regular expression ".*_m_000123_0" would keep the files
+ for the first instance of map 123 that ran.
+
+ @param pattern the java.util.regex.Pattern to match against the
+ task names.]]>
+ </doc>
+ </method>
+ <method name="getKeepTaskFilesPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the regular expression that is matched against the task names
+ to see if we need to keep the files.
+
+ @return the pattern as a string, if it was set, othewise null.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the default file system.
+
+ @param dir the new current working directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the default file system.
+
+ @return the directory name.]]>
+ </doc>
+ </method>
+ <method name="setNumTasksToExecutePerJvm"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numTasks" type="int"/>
+ <doc>
+ <![CDATA[Sets the number of tasks that a spawned task JVM should run
+ before it exits
+ @param numTasks the number of tasks to execute; defaults to 1;
+ -1 signifies no limit]]>
+ </doc>
+ </method>
+ <method name="getNumTasksToExecutePerJvm" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of tasks that a spawned JVM should execute]]>
+ </doc>
+ </method>
+ <method name="getInputFormat" return="org.apache.hadoop.mapred.InputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link InputFormat} implementation for the map-reduce job,
+ defaults to {@link TextInputFormat} if not specified explicity.
+
+ @return the {@link InputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link InputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link InputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="getOutputFormat" return="org.apache.hadoop.mapred.OutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link OutputFormat} implementation for the map-reduce job,
+ defaults to {@link TextOutputFormat} if not specified explicity.
+
+ @return the {@link OutputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getOutputCommitter" return="org.apache.hadoop.mapred.OutputCommitter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link OutputCommitter} implementation for the map-reduce job,
+ defaults to {@link FileOutputCommitter} if not specified explicitly.
+
+ @return the {@link OutputCommitter} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setOutputCommitter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputCommitter&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link OutputCommitter} implementation for the map-reduce job.
+
+ @param theClass the {@link OutputCommitter} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="setOutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputFormat&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link OutputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link OutputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="setCompressMapOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Should the map outputs be compressed before transfer?
+ Uses the SequenceFile compression.
+
+ @param compress should the map outputs be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressMapOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Are the outputs of the maps be compressed?
+
+ @return <code>true</code> if the outputs of the maps are to be compressed,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the given class as the {@link CompressionCodec} for the map outputs.
+
+ @param codecClass the {@link CompressionCodec} class that will compress
+ the map outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the map outputs.
+
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} class that should be used to compress the
+ map outputs.
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getMapOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the map output data. If it is not set, use the
+ (final) output key class. This allows the map output key class to be
+ different than the final output key class.
+
+ @return the map output key class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the map output data. This allows the user to
+ specify the map output key class to be different than the final output
+ value class.
+
+ @param theClass the map output key class.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for the map output data. If it is not set, use the
+ (final) output value class This allows the map output value class to be
+ different than the final output value class.
+
+ @return the map output value class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for the map output data. This allows the user to
+ specify the map output value class to be different than the final output
+ value class.
+
+ @param theClass the map output value class.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the job output data.
+
+ @return the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the job output data.
+
+ @param theClass the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link RawComparator} comparator used to compare keys.
+
+ @return the {@link RawComparator} comparator used to compare keys.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyComparatorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.RawComparator&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link RawComparator} comparator used to compare keys.
+
+ @param theClass the {@link RawComparator} comparator used to
+ compare keys.
+ @see #setOutputValueGroupingComparator(Class)]]>
+ </doc>
+ </method>
+ <method name="setKeyFieldComparatorOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keySpec" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the {@link KeyFieldBasedComparator} options used to compare keys.
+
+ @param keySpec the key specification of the form -k pos1[,pos2], where,
+ pos is of the form f[.c][opts], where f is the number
+ of the key field to use, and c is the number of the first character from
+ the beginning of the field. Fields and character posns are numbered
+ starting with 1; a character position of zero in pos2 indicates the
+ field's last character. If '.c' is omitted from pos1, it defaults to 1
+ (the beginning of the field); if omitted from pos2, it defaults to 0
+ (the end of the field). opts are ordering options. The supported options
+ are:
+ -n, (Sort numerically)
+ -r, (Reverse the result of comparison)]]>
+ </doc>
+ </method>
+ <method name="getKeyFieldComparatorOption" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link KeyFieldBasedComparator} options]]>
+ </doc>
+ </method>
+ <method name="setKeyFieldPartitionerOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keySpec" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the {@link KeyFieldBasedPartitioner} options used for
+ {@link Partitioner}
+
+ @param keySpec the key specification of the form -k pos1[,pos2], where,
+ pos is of the form f[.c][opts], where f is the number
+ of the key field to use, and c is the number of the first character from
+ the beginning of the field. Fields and character posns are numbered
+ starting with 1; a character position of zero in pos2 indicates the
+ field's last character. If '.c' is omitted from pos1, it defaults to 1
+ (the beginning of the field); if omitted from pos2, it defaults to 0
+ (the end of the field).]]>
+ </doc>
+ </method>
+ <method name="getKeyFieldPartitionerOption" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link KeyFieldBasedPartitioner} options]]>
+ </doc>
+ </method>
+ <method name="getOutputValueGroupingComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user defined {@link WritableComparable} comparator for
+ grouping keys of inputs to the reduce.
+
+ @return comparator set by the user for grouping values.
+ @see #setOutputValueGroupingComparator(Class) for details.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueGroupingComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.RawComparator&gt;"/>
+ <doc>
+ <![CDATA[Set the user defined {@link RawComparator} comparator for
+ grouping keys in the input to the reduce.
+
+ <p>This comparator should be provided if the equivalence rules for keys
+ for sorting the intermediates are different from those for grouping keys
+ before each call to
+ {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
+
+ <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
+ in a single call to the reduce function if K1 and K2 compare as equal.</p>
+
+ <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
+ how keys are sorted, this can be used in conjunction to simulate
+ <i>secondary sort on values</i>.</p>
+
+ <p><i>Note</i>: This is not a guarantee of the reduce sort being
+ <i>stable</i> in any sense. (In any case, with the order of available
+ map-outputs to the reduce being non-deterministic, it wouldn't make
+ that much sense.)</p>
+
+ @param theClass the comparator class to be used for grouping keys.
+ It should implement <code>RawComparator</code>.
+ @see #setOutputKeyComparatorClass(Class)]]>
+ </doc>
+ </method>
+ <method name="getOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for job outputs.
+
+ @return the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for job outputs.
+
+ @param theClass the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapperClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Mapper} class for the job.
+
+ @return the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapperClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Mapper} class for the job.
+
+ @param theClass the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getMapRunnerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.MapRunnable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link MapRunnable} class for the job.
+
+ @return the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapRunnerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.MapRunnable&gt;"/>
+ <doc>
+ <![CDATA[Expert: Set the {@link MapRunnable} class for the job.
+
+ Typically used to exert greater control on {@link Mapper}s.
+
+ @param theClass the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getPartitionerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Partitioner&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Partitioner} used to partition {@link Mapper}-outputs
+ to be sent to the {@link Reducer}s.
+
+ @return the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setPartitionerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Partitioner&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Partitioner} class used to partition
+ {@link Mapper}-outputs to be sent to the {@link Reducer}s.
+
+ @param theClass the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getReducerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Reducer} class for the job.
+
+ @return the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setReducerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Reducer} class for the job.
+
+ @param theClass the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getCombinerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers. Typically the combiner is same as the
+ the {@link Reducer} for the job i.e. {@link #getReducerClass()}.
+
+ @return the user-defined combiner class used to combine map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCombinerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"/>
+ <doc>
+ <![CDATA[Set the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers.
+
+ <p>The combiner is a task-level aggregation operation which, in some cases,
+ helps to cut down the amount of data transferred from the {@link Mapper} to
+ the {@link Reducer}, leading to better performance.</p>
+
+ <p>Typically the combiner is same as the <code>Reducer</code> for the
+ job i.e. {@link #setReducerClass(Class)}.</p>
+
+ @param theClass the user-defined combiner class used to combine
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on, else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getMapSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for map tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be
+ used for this job for map tasks,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for map tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for map tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getReduceSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for reduce tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used
+ for reduce tasks for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setReduceSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for reduce tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for reduce tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getNumMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job.
+ Defaults to <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumMapTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the number of map tasks for this job.
+
+ <p><i>Note</i>: This is only a <i>hint</i> to the framework. The actual
+ number of spawned map tasks depends on the number of {@link InputSplit}s
+ generated by the job's {@link InputFormat#getSplits(JobConf, int)}.
+
+ A custom {@link InputFormat} is typically used to accurately control
+ the number of map tasks for the job.</p>
+
+ <h4 id="NoOfMaps">How many maps?</h4>
+
+ <p>The number of maps is usually driven by the total size of the inputs
+ i.e. total number of blocks of the input files.</p>
+
+ <p>The right level of parallelism for maps seems to be around 10-100 maps
+ per-node, although it has been set up to 300 or so for very cpu-light map
+ tasks. Task setup takes awhile, so it is best if the maps take at least a
+ minute to execute.</p>
+
+ <p>The default behavior of file-based {@link InputFormat}s is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of input files. However, the {@link FileSystem} blocksize of the
+ input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Thus, if you expect 10TB of input data and have a blocksize of 128MB,
+ you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is
+ used to set it even higher.</p>
+
+ @param n the number of map tasks for this job.
+ @see InputFormat#getSplits(JobConf, int)
+ @see FileInputFormat
+ @see FileSystem#getDefaultBlockSize()
+ @see FileStatus#getBlockSize()]]>
+ </doc>
+ </method>
+ <method name="getNumReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job. Defaults to
+ <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumReduceTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the requisite number of reduce tasks for this job.
+
+ <h4 id="NoOfReduces">How many reduces?</h4>
+
+ <p>The right number of reduces seems to be <code>0.95</code> or
+ <code>1.75</code> multiplied by (&lt;<i>no. of nodes</i>&gt; *
+ <a href="{@docRoot}/../hadoop-default.html#mapred.tasktracker.reduce.tasks.maximum">
+ mapred.tasktracker.reduce.tasks.maximum</a>).
+ </p>
+
+ <p>With <code>0.95</code> all of the reduces can launch immediately and
+ start transfering map outputs as the maps finish. With <code>1.75</code>
+ the faster nodes will finish their first round of reduces and launch a
+ second wave of reduces doing a much better job of load balancing.</p>
+
+ <p>Increasing the number of reduces increases the framework overhead, but
+ increases load balancing and lowers the cost of failures.</p>
+
+ <p>The scaling factors above are slightly less than whole numbers to
+ reserve a few reduce slots in the framework for speculative-tasks, failures
+ etc.</p>
+
+ <h4 id="ReducerNone">Reducer NONE</h4>
+
+ <p>It is legal to set the number of reduce-tasks to <code>zero</code>.</p>
+
+ <p>In this case the output of the map-tasks directly go to distributed
+ file-system, to the path set by
+ {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Also, the
+ framework doesn't sort the map-outputs before writing it out to HDFS.</p>
+
+ @param n the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ map task, as specified by the <code>mapred.map.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ map task.
+
+ @param n the number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ reduce task, as specified by the <code>mapred.reduce.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ reduce task.
+
+ @param n the number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name. This is only used to identify the
+ job to the user.
+
+ @return the job's name, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified job name.
+
+ @param name the job's new name.]]>
+ </doc>
+ </method>
+ <method name="getSessionId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified session identifier. The default is the empty string.
+
+ The session identifier is used to tag metric data that is reported to some
+ performance metrics system via the org.apache.hadoop.metrics API. The
+ session identifier is intended, in particular, for use by Hadoop-On-Demand
+ (HOD) which allocates a virtual Hadoop cluster dynamically and transiently.
+ HOD will set the session identifier by modifying the hadoop-site.xml file
+ before starting the cluster.
+
+ When not running under HOD, this identifer is expected to remain set to
+ the empty string.
+
+ @return the session identifier, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setSessionId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sessionId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified session identifier.
+
+ @param sessionId the new session id.]]>
+ </doc>
+ </method>
+ <method name="setMaxTaskFailuresPerTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="noFailures" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds <code>noFailures</code>, the
+ tasktracker is <i>blacklisted</i> for this job.
+
+ @param noFailures maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxTaskFailuresPerTracker" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Expert: Get the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds this, the tasktracker is
+ <i>blacklisted</i> for this job.
+
+ @return the maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of map tasks that can fail without
+ the job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed map-task results in
+ the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the maximum percentage of map tasks that can fail without the
+ job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts} attempts
+ before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of reduce tasks that can fail without
+ the job being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed reduce-task results
+ in the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum percentage of reduce tasks that can fail without the job
+ being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="prio" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Set {@link JobPriority} for this job.
+
+ @param prio the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link JobPriority} for this job.
+
+ @return the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getProfileEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get whether the task profiling is enabled.
+ @return true if some tasks will be profiled]]>
+ </doc>
+ </method>
+ <method name="setProfileEnabled"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the system should collect profiler information for some of
+ the tasks in this job? The information is stored in the user log
+ directory.
+ @param newValue true means it should be gathered]]>
+ </doc>
+ </method>
+ <method name="getProfileParams" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the profiler configuration arguments.
+
+ The default value for this property is
+ "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s"
+
+ @return the parameters to pass to the task child to configure profiling]]>
+ </doc>
+ </method>
+ <method name="setProfileParams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the profiler configuration arguments. If the string contains a '%s' it
+ will be replaced with the name of the profiling output file when the task
+ runs.
+
+ This value is passed to the task child JVM on the command line.
+
+ @param value the configuration string]]>
+ </doc>
+ </method>
+ <method name="getProfileTaskRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <doc>
+ <![CDATA[Get the range of maps or reduces to profile.
+ @param isMap is the task a map?
+ @return the task ranges]]>
+ </doc>
+ </method>
+ <method name="setProfileTaskRange"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <param name="newValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the ranges of maps or reduces to profile. setProfileEnabled(true)
+ must also be called.
+ @param newValue a set of integer ranges of the map ids]]>
+ </doc>
+ </method>
+ <method name="setMapDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the map tasks fail.
+
+ <p>The debug script can aid debugging of failed map tasks. The script is
+ given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script needs to be symlinked. </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setMapDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param mDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getMapDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the map task's debug script.
+
+ @return the debug Script for the mapred job for failed map tasks.
+ @see #setMapDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="setReduceDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the reduce tasks fail.
+
+ <p>The debug script can aid debugging of failed reduce tasks. The script
+ is given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script file needs to be symlinked </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setReduceDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param rDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getReduceDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reduce task's debug Script
+
+ @return the debug script for the mapred job for failed reduce tasks.
+ @see #setReduceDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="getJobEndNotificationURI" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ @return the job end notification uri, <code>null</code> if it hasn't
+ been set.
+ @see #setJobEndNotificationURI(String)]]>
+ </doc>
+ </method>
+ <method name="setJobEndNotificationURI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ <p>The uri can contain 2 special parameters: <tt>$jobId</tt> and
+ <tt>$jobStatus</tt>. Those, if present, are replaced by the job's
+ identifier and completion-status respectively.</p>
+
+ <p>This is typically used by application-writers to implement chaining of
+ Map-Reduce jobs in an <i>asynchronous manner</i>.</p>
+
+ @param uri the job end notification uri
+ @see JobStatus
+ @see <a href="{@docRoot}/org/apache/hadoop/mapred/JobClient.html#JobCompletionAndChaining">Job Completion and Chaining</a>]]>
+ </doc>
+ </method>
+ <method name="getJobLocalDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get job-specific shared directory for use as scratch space
+
+ <p>
+ When a job starts, a shared directory is created at location
+ <code>
+ ${mapred.local.dir}/taskTracker/jobcache/$jobid/work/ </code>.
+ This directory is exposed to the users through
+ <code>job.local.dir </code>.
+ So, the tasks can use this space
+ as scratch space and share files among them. </p>
+ This value is available as System property also.
+
+ @return The localized job specific shared directory]]>
+ </doc>
+ </method>
+ <method name="getQueueName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the name of the queue to which this job is submitted.
+ Defaults to 'default'.
+
+ @return name of the queue]]>
+ </doc>
+ </method>
+ <method name="setQueueName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queueName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the name of the queue to which this job should be submitted.
+
+ @param queueName Name of the queue]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_QUEUE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Name of the queue to which jobs will be submitted, if no queue
+ name is mentioned.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A map/reduce job configuration.
+
+ <p><code>JobConf</code> is the primary interface for a user to describe a
+ map-reduce job to the Hadoop framework for execution. The framework tries to
+ faithfully execute the job as-is described by <code>JobConf</code>, however:
+ <ol>
+ <li>
+ Some configuration parameters might have been marked as
+ <a href="{@docRoot}/org/apache/hadoop/conf/Configuration.html#FinalParams">
+ final</a> by administrators and hence cannot be altered.
+ </li>
+ <li>
+ While some job parameters are straight-forward to set
+ (e.g. {@link #setNumReduceTasks(int)}), some parameters interact subtly
+ rest of the framework and/or job-configuration and is relatively more
+ complex for the user to control finely (e.g. {@link #setNumMapTasks(int)}).
+ </li>
+ </ol></p>
+
+ <p><code>JobConf</code> typically specifies the {@link Mapper}, combiner
+ (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and
+ {@link OutputFormat} implementations to be used etc.
+
+ <p>Optionally <code>JobConf</code> is used to specify other advanced facets
+ of the job such as <code>Comparator</code>s to be used, files to be put in
+ the {@link DistributedCache}, whether or not intermediate and/or job outputs
+ are to be compressed (and how), debugability via user-provided scripts
+ ( {@link #setMapDebugScript(String)}/{@link #setReduceDebugScript(String)}),
+ for doing post-processing on task logs, task's stdout, stderr, syslog.
+ and etc.</p>
+
+ <p>Here is an example on how to configure a job via <code>JobConf</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ FileInputFormat.setInputPaths(job, new Path("in"));
+ FileOutputFormat.setOutputPath(job, new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setCombinerClass(MyJob.MyReducer.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ job.setInputFormat(SequenceFileInputFormat.class);
+ job.setOutputFormat(SequenceFileOutputFormat.class);
+ </pre></blockquote></p>
+
+ @see JobClient
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobConf -->
+ <!-- start interface org.apache.hadoop.mapred.JobConfigurable -->
+ <interface name="JobConfigurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Initializes a new instance from a {@link JobConf}.
+
+ @param job the configuration]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[That what may be configured.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobConfigurable -->
+ <!-- start class org.apache.hadoop.mapred.JobContext -->
+ <class name="JobContext" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job Configuration
+
+ @return JobConf]]>
+ </doc>
+ </method>
+ <method name="getProgressible" return="org.apache.hadoop.util.Progressable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the progress mechanism for reporting progress.
+
+ @return progress mechanism]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobContext -->
+ <!-- start class org.apache.hadoop.mapred.JobEndNotifier -->
+ <class name="JobEndNotifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobEndNotifier"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="startNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="stopNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="registerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ <method name="localRunnerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobEndNotifier -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory -->
+ <class name="JobHistory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="hostname" type="java.lang.String"/>
+ <param name="jobTrackerStartTime" type="long"/>
+ <doc>
+ <![CDATA[Initialize JobHistory files.
+ @param conf Jobconf of the job tracker.
+ @param hostname jobtracker's hostname
+ @param jobTrackerStartTime jobtracker's start time
+ @return true if intialized properly
+ false otherwise]]>
+ </doc>
+ </method>
+ <method name="parseHistoryFromFS"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="l" type="org.apache.hadoop.mapred.JobHistory.Listener"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parses history file and invokes Listener.handle() for
+ each line of history. It can be used for looking through history
+ files for specific items without having to keep whole history in memory.
+ @param path path to history file
+ @param l Listener for history events
+ @param fs FileSystem where history file is present
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isDisableHistory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns history disable status. by default history is enabled so this
+ method returns false.
+ @return true if history logging is disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="setDisableHistory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="disableHistory" type="boolean"/>
+ <doc>
+ <![CDATA[Enable/disable history logging. Default value is false, so history
+ is enabled by default.
+ @param disableHistory true if history should be disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getTaskLogsUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attempt" type="org.apache.hadoop.mapred.JobHistory.TaskAttempt"/>
+ <doc>
+ <![CDATA[Return the TaskLogsUrl of a particular TaskAttempt
+
+ @param attempt
+ @return the taskLogsUrl. null if http-port or tracker-name or
+ task-attempt-id are unavailable.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JOB_NAME_TRIM_LENGTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provides methods for writing to and reading from job history.
+ Job History works in an append mode, JobHistory and its inner classes provide methods
+ to log job events.
+
+ JobHistory is split into multiple files, format of each file is plain text where each line
+ is of the format [type (key=value)*], where type identifies the type of the record.
+ Type maps to UID of one of the inner classes of this class.
+
+ Job history is maintained in a master index which contains star/stop times of all jobs with
+ a few other job level properties. Apart from this each job's history is maintained in a seperate history
+ file. name of job history files follows the format jobtrackerId_jobid
+
+ For parsing the job history it supports a listener based interface where each line is parsed
+ and passed to listener. The listener can create an object model of history or look for specific
+ events and discard rest of the history.
+
+ CHANGE LOG :
+ Version 0 : The history has the following format :
+ TAG KEY1="VALUE1" KEY2="VALUE2" and so on.
+ TAG can be Job, Task, MapAttempt or ReduceAttempt.
+ Note that a '"' is the line delimiter.
+ Version 1 : Changes the line delimiter to '.'
+ Values are now escaped for unambiguous parsing.
+ Added the Meta tag to store version info.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <class name="JobHistory.HistoryCleaner" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobHistory.HistoryCleaner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Cleans up history data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Delete history files older than one month. Update master index and remove all
+ jobs older than one month. Also if a job tracker has no jobs in last one month
+ remove reference to the job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <class name="JobHistory.JobInfo" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.JobInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create new JobInfo]]>
+ </doc>
+ </constructor>
+ <method name="getAllTasks" return="java.util.Map&lt;java.lang.String, org.apache.hadoop.mapred.JobHistory.Task&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all map and reduce tasks <taskid-Task>.]]>
+ </doc>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Get the path of the locally stored job file
+ @param jobId id of the job
+ @return the path of the job file on the local file system]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFile" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the path of the job-history
+ log file.
+
+ @param logFile path of the job-history file
+ @return URL encoded path
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL encoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="decodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to decode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL decoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the user name from the job conf]]>
+ </doc>
+ </method>
+ <method name="getJobHistoryLogLocation" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the job history file path given the history filename]]>
+ </doc>
+ </method>
+ <method name="getJobHistoryLogLocationForUser" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the user job history file path]]>
+ </doc>
+ </method>
+ <method name="getJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="id" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recover the job history filename from the history folder.
+ Uses the following pattern
+ $jt-hostname_[0-9]*_$job-id_$user-$job-name*
+ @param jobConf the job conf
+ @param id job id]]>
+ </doc>
+ </method>
+ <method name="recoverJobHistoryFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="logFilePath" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Selects one of the two files generated as a part of recovery.
+ The thumb rule is that always select the oldest file.
+ This call makes sure that only one file is left in the end.
+ @param conf job conf
+ @param logFilePath Path of the log file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logSubmitted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="jobConfPath" type="java.lang.String"/>
+ <param name="submitTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Log job submitted event to history. Creates a new file in history
+ for the job. if history file creation fails, it disables history
+ for all other events.
+ @param jobId job id assigned by job tracker.
+ @param jobConf job conf of the job
+ @param jobConfPath path to job conf xml file in HDFS.
+ @param submitTime time when job tracker received the job
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logInited"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs launch time of job.
+
+ @param jobId job id, assigned by jobtracker.
+ @param startTime start time of job.
+ @param totalMaps total maps assigned by jobtracker.
+ @param totalReduces total reduces.]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link #logInited(JobID, long, int, int)} and
+ {@link #logStarted(JobID)}">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs the job as RUNNING.
+
+ @param jobId job id, assigned by jobtracker.
+ @param startTime start time of job.
+ @param totalMaps total maps assigned by jobtracker.
+ @param totalReduces total reduces.
+ @deprecated Use {@link #logInited(JobID, long, int, int)} and
+ {@link #logStarted(JobID)}]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Logs job as running
+ @param jobId job id, assigned by jobtracker.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="finishTime" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <param name="failedMaps" type="int"/>
+ <param name="failedReduces" type="int"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log job finished. closes the job file in history.
+ @param jobId job id, assigned by jobtracker.
+ @param finishTime finish time of job in ms.
+ @param finishedMaps no of maps successfully finished.
+ @param finishedReduces no of reduces finished sucessfully.
+ @param failedMaps no of failed map tasks.
+ @param failedReduces no of failed reduce tasks.
+ @param counters the counters from the job]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs job failed event. Closes the job history log file.
+ @param jobid job id
+ @param timestamp time when job failure was detected in ms.
+ @param finishedMaps no finished map tasks.
+ @param finishedReduces no of finished reduce tasks.]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs job killed event. Closes the job history log file.
+
+ @param jobid
+ job id
+ @param timestamp
+ time when job killed was issued in ms.
+ @param finishedMaps
+ no finished map tasks.
+ @param finishedReduces
+ no of finished reduce tasks.]]>
+ </doc>
+ </method>
+ <method name="logJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="priority" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Log job's priority.
+ @param jobid job id
+ @param priority Jobs priority]]>
+ </doc>
+ </method>
+ <method name="logJobInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="submitTime" type="long"/>
+ <param name="launchTime" type="long"/>
+ <param name="restartCount" type="int"/>
+ <doc>
+ <![CDATA[Log job's submit-time/launch-time
+ @param jobid job id
+ @param submitTime job's submit time
+ @param launchTime job's launch time
+ @param restartCount number of times the job got restarted]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to job start, finish or failure.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <class name="JobHistory.Keys" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.Keys&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Keys[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Keys"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Job history files contain key="value" pairs, where keys belong to this enum.
+ It acts as a global namespace for all keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <!-- start interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <interface name="JobHistory.Listener" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="handle"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recType" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"/>
+ <param name="values" type="java.util.Map&lt;org.apache.hadoop.mapred.JobHistory.Keys, java.lang.String&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Callback method for history parser.
+ @param recType type of record, which is the first entry in the line.
+ @param values a map of key-value pairs as thry appear in history.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Callback interface for reading back log events from JobHistory. This interface
+ should be implemented and passed to JobHistory.parseHistory()]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <class name="JobHistory.MapAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.MapAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logStarted(TaskAttemptID, long, String, int, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of this map task attempt.
+ @param taskAttemptId task attempt id
+ @param startTime start time of task attempt as reported by task tracker.
+ @param hostName host name of the task attempt.
+ @deprecated Use
+ {@link #logStarted(TaskAttemptID, long, String, int, String)}]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="trackerName" type="java.lang.String"/>
+ <param name="httpPort" type="int"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of this map task attempt.
+
+ @param taskAttemptId task attempt id
+ @param startTime start time of task attempt as reported by task tracker.
+ @param trackerName name of the tracker executing the task attempt.
+ @param httpPort http port of the task tracker executing the task attempt
+ @param taskType Whether the attempt is cleanup or setup or map]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logFinished(TaskAttemptID, long, String, String, String, Counters)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finish time of map task attempt.
+ @param taskAttemptId task attempt id
+ @param finishTime finish time
+ @param hostName host name
+ @deprecated Use
+ {@link #logFinished(TaskAttemptID, long, String, String, String, Counters)}]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="stateString" type="java.lang.String"/>
+ <param name="counter" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finish time of map task attempt.
+
+ @param taskAttemptId task attempt id
+ @param finishTime finish time
+ @param hostName host name
+ @param taskType Whether the attempt is cleanup or setup or map
+ @param stateString state string of the task attempt
+ @param counter counters of the task attempt]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logFailed(TaskAttemptID, long, String, String, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt failed event.
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.
+ @deprecated Use
+ {@link #logFailed(TaskAttemptID, long, String, String, String)}]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt failed event.
+
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.
+ @param taskType Whether the attempt is cleanup or setup or map]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logKilled(TaskAttemptID, long, String, String, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt killed event.
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.
+ @deprecated Use
+ {@link #logKilled(TaskAttemptID, long, String, String, String)}]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt killed event.
+
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.
+ @param taskType Whether the attempt is cleanup or setup or map]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <class name="JobHistory.RecordTypes" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.RecordTypes&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.RecordTypes[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Record types are identifiers for each line of log in history files.
+ A record type appears as the first token in a single line of log.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <class name="JobHistory.ReduceAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.ReduceAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logStarted(TaskAttemptID, long, String, int, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of Reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param startTime start time
+ @param hostName host name
+ @deprecated Use
+ {@link #logStarted(TaskAttemptID, long, String, int, String)}]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="trackerName" type="java.lang.String"/>
+ <param name="httpPort" type="int"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of Reduce task attempt.
+
+ @param taskAttemptId task attempt id
+ @param startTime start time
+ @param trackerName tracker name
+ @param httpPort the http port of the tracker executing the task attempt
+ @param taskType Whether the attempt is cleanup or setup or reduce]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logFinished(TaskAttemptID, long, long, long, String, String, String, Counters)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finished event of this task.
+ @param taskAttemptId task attempt id
+ @param shuffleFinished shuffle finish time
+ @param sortFinished sort finish time
+ @param finishTime finish time of task
+ @param hostName host name where task attempt executed
+ @deprecated Use
+ {@link #logFinished(TaskAttemptID, long, long, long, String, String, String, Counters)}]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="stateString" type="java.lang.String"/>
+ <param name="counter" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finished event of this task.
+
+ @param taskAttemptId task attempt id
+ @param shuffleFinished shuffle finish time
+ @param sortFinished sort finish time
+ @param finishTime finish time of task
+ @param hostName host name where task attempt executed
+ @param taskType Whether the attempt is cleanup or setup or reduce
+ @param stateString the state string of the attempt
+ @param counter counters of the attempt]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logFailed(TaskAttemptID, long, String, String, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log failed reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.
+ @deprecated Use
+ {@link #logFailed(TaskAttemptID, long, String, String, String)}]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log failed reduce task attempt.
+
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.
+ @param taskType Whether the attempt is cleanup or setup or reduce]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logKilled(TaskAttemptID, long, String, String, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log killed reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.
+ @deprecated Use
+ {@link #logKilled(TaskAttemptID, long, String, String, String)}]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log killed reduce task attempt.
+
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.
+ @param taskType Whether the attempt is cleanup or setup or reduce]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Task -->
+ <class name="JobHistory.Task" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.Task"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="splitLocations" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of task (TIP).
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param startTime startTime of tip.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finish time of task.
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param finishTime finish timeof task in ms]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log job failed event.
+ @param taskId task id
+ @param taskType MAP or REDUCE.
+ @param time timestamp when job failed detected.
+ @param error error message for failure.]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="failedDueToAttempt" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[@param failedDueToAttempt The attempt that caused the failure, if any]]>
+ </doc>
+ </method>
+ <method name="getTaskAttempts" return="java.util.Map&lt;java.lang.String, org.apache.hadoop.mapred.JobHistory.TaskAttempt&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all task attempts for this task. <task attempt id - TaskAttempt>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to Task's start, finish or failure.
+ All events logged by this class are logged in a separate file per job in
+ job tracker history. These events map to TIPs in jobtracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Task -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <class name="JobHistory.TaskAttempt" extends="org.apache.hadoop.mapred.JobHistory.Task"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.TaskAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Base class for Map and Reduce TaskAttempts.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Values -->
+ <class name="JobHistory.Values" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.Values&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Values[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Values"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[This enum contains some of the values commonly used by history log events.
+ since values in history can only be strings - Values.name() is used in
+ most places in history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Values -->
+ <!-- start class org.apache.hadoop.mapred.JobID -->
+ <class name="JobID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobID" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a JobID object
+ @param jtIdentifier jobTracker identifier
+ @param id job number]]>
+ </doc>
+ </constructor>
+ <method name="getJtIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare JobIds by first jtIdentifiers, then by job numbers]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a JobId object from given string
+ @return constructed JobId object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getJobIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>any job</i>
+ run on the jobtracker started at <i>200707121733</i>, we would use :
+ <pre>
+ JobID.getTaskIDsPattern("200707121733", null);
+ </pre>
+ which will return :
+ <pre> "job_200707121733_[0-9]*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @return a regex pattern matching JobIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[JobID represents the immutable and unique identifier for
+ the job. JobID consists of two parts. First part
+ represents the jobtracker identifier, so that jobID to jobtracker map
+ is defined. For cluster setup this string is the jobtracker
+ start time, for local setting, it is "local".
+ Second part of the JobID is the job number. <br>
+ An example JobID is :
+ <code>job_200707121733_0003</code> , which represents the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse JobID strings, but rather
+ use appropriate constructors or {@link #forName(String)} method.
+
+ @see TaskID
+ @see TaskAttemptID
+ @see JobTracker#getNewJobId()
+ @see JobTracker#getStartTime()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobID -->
+ <!-- start class org.apache.hadoop.mapred.JobPriority -->
+ <class name="JobPriority" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobPriority&gt;"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobPriority[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Used to describe the priority of the running job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobPriority -->
+ <!-- start class org.apache.hadoop.mapred.JobProfile -->
+ <class name="JobProfile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobProfile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an empty {@link JobProfile}.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, org.apache.hadoop.mapred.JobID, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a {@link JobProfile} the userid, jobid,
+ job config-file, job-details url and job name.
+
+ @param user userid of the person who submitted the job.
+ @param jobid id of the job.
+ @param jobFile job configuration file.
+ @param url link to the web-ui for details of the job.
+ @param name user-specified job name.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, org.apache.hadoop.mapred.JobID, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a {@link JobProfile} the userid, jobid,
+ job config-file, job-details url and job name.
+
+ @param user userid of the person who submitted the job.
+ @param jobid id of the job.
+ @param jobFile job configuration file.
+ @param url link to the web-ui for details of the job.
+ @param name user-specified job name.
+ @param queueName name of the queue to which the job is submitted]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="use JobProfile(String, JobID, String, String, String) instead">
+ <doc>
+ <![CDATA[@deprecated use JobProfile(String, JobID, String, String, String) instead]]>
+ </doc>
+ </constructor>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user id.]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job id.]]>
+ </doc>
+ </method>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use getJobID() instead">
+ <doc>
+ <![CDATA[@deprecated use getJobID() instead]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configuration file for the job.]]>
+ </doc>
+ </method>
+ <method name="getURL" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the link to the web-ui for details of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name.]]>
+ </doc>
+ </method>
+ <method name="getQueueName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the queue to which the job is submitted.
+ @return name of the queue.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A JobProfile is a MapReduce primitive. Tracks a job,
+ whether living or dead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobProfile -->
+ <!-- start class org.apache.hadoop.mapred.JobQueueInfo -->
+ <class name="JobQueueInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobQueueInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor for Job Queue Info.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobQueueInfo" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a new JobQueueInfo object using the queue name and the
+ scheduling information passed.
+
+ @param queueName Name of the job queue
+ @param schedulingInfo Scheduling Information associated with the job
+ queue]]>
+ </doc>
+ </constructor>
+ <method name="setQueueName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queueName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the queue name of the JobQueueInfo
+
+ @param queueName Name of the job queue.]]>
+ </doc>
+ </method>
+ <method name="getQueueName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the queue name from JobQueueInfo
+
+ @return queue name]]>
+ </doc>
+ </method>
+ <method name="setSchedulingInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="schedulingInfo" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the scheduling information associated to particular job queue
+
+ @param schedulingInfo]]>
+ </doc>
+ </method>
+ <method name="getSchedulingInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the scheduling information associated to particular job queue.
+ If nothing is set would return <b>"N/A"</b>
+
+ @return Scheduling information associated to particular Job Queue]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Class that contains the information regarding the Job Queues which are
+ maintained by the Hadoop Map/Reduce framework.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobQueueInfo -->
+ <!-- start class org.apache.hadoop.mapred.JobShell -->
+ <class name="JobShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run method from Tool]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Provide command line parsing for JobSubmission
+ job submission looks like
+ hadoop jar -libjars <comma seperated jars> -archives <comma seperated archives>
+ -files <comma seperated files> inputjar args]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobShell -->
+ <!-- start class org.apache.hadoop.mapred.JobStatus -->
+ <class name="JobStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="JobStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on cleanup
+ @param runState The current state of the job]]>
+ </doc>
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job]]>
+ </doc>
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, int, org.apache.hadoop.mapred.JobPriority"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job
+ @param jp Priority of the job.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, float, int, org.apache.hadoop.mapred.JobPriority"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.]]>
+ </doc>
+ </constructor>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use getJobID instead">
+ <doc>
+ <![CDATA[@deprecated use getJobID instead]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The jobid of the Job]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in maps]]>
+ </doc>
+ </method>
+ <method name="cleanupProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in cleanup]]>
+ </doc>
+ </method>
+ <method name="setupProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in setup]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in reduce]]>
+ </doc>
+ </method>
+ <method name="getRunState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return running state of the job]]>
+ </doc>
+ </method>
+ <method name="setRunState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Change the current run state of the job.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return start time of the job]]>
+ </doc>
+ </method>
+ <method name="clone" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUsername" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the username of the job]]>
+ </doc>
+ </method>
+ <method name="getSchedulingInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the Scheduling information associated to a particular Job.
+ @return the scheduling information of the job]]>
+ </doc>
+ </method>
+ <method name="setSchedulingInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="schedulingInfo" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Used to set the scheduling information associated to a particular Job.
+
+ @param schedulingInfo Scheduling information of the job]]>
+ </doc>
+ </method>
+ <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the priority of the job
+ @return job priority]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jp" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Set the priority of the job, defaulting to NORMAL.
+ @param jp new job priority]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SUCCEEDED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PREP" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="KILLED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Describes the current status of a job. This is
+ not intended to be a comprehensive piece of data.
+ For that, look at JobProfile.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobStatus -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker -->
+ <class name="JobTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.InterTrackerProtocol"/>
+ <implements name="org.apache.hadoop.mapred.JobSubmissionProtocol"/>
+ <implements name="org.apache.hadoop.mapred.TaskTrackerManager"/>
+ <method name="startTracker" return="org.apache.hadoop.mapred.JobTracker"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker with given configuration.
+
+ The conf will be modified to reflect the actual ports on which
+ the JobTracker is up and running if the user passes the port as
+ <code>zero</code>.
+
+ @param conf configuration for the JobTracker.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="hasRestarted" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Whether the JT has restarted]]>
+ </doc>
+ </method>
+ <method name="hasRecovered" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Whether the JT has recovered upon restart]]>
+ </doc>
+ </method>
+ <method name="getRecoveryDuration" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[How long the jobtracker took to recover from restart.]]>
+ </doc>
+ </method>
+ <method name="getInstrumentationClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.JobTrackerInstrumentation&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setInstrumentationClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="t" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.JobTrackerInstrumentation&gt;"/>
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="offerService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Run forever]]>
+ </doc>
+ </method>
+ <method name="getTotalSubmissions" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobTrackerMachine" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTrackerIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the unique identifier (ie. timestamp) of this job tracker start.
+ @return a string with a unique identifier]]>
+ </doc>
+ </method>
+ <method name="getTrackerPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="runningJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRunningJobs" return="java.util.List&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version that is called from a timer thread, and therefore needs to be
+ careful to synchronize.]]>
+ </doc>
+ </method>
+ <method name="failedJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="completedJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="taskTrackers" return="java.util.Collection&lt;org.apache.hadoop.mapred.TaskTrackerStatus&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskTracker" return="org.apache.hadoop.mapred.TaskTrackerStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trackerID" type="java.lang.String"/>
+ </method>
+ <method name="resolveAndAddToTopology" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getNodesAtMaxLevel" return="java.util.Collection&lt;org.apache.hadoop.net.Node&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a collection of nodes at the max level]]>
+ </doc>
+ </method>
+ <method name="getParentNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <param name="level" type="int"/>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the Node in the network topology that corresponds to the hostname]]>
+ </doc>
+ </method>
+ <method name="getNumTaskCacheLevels" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumResolvedTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumberOfUniqueHosts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="addJobInProgressListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="listener" type="org.apache.hadoop.mapred.JobInProgressListener"/>
+ </method>
+ <method name="removeJobInProgressListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="listener" type="org.apache.hadoop.mapred.JobInProgressListener"/>
+ </method>
+ <method name="getQueueManager" return="org.apache.hadoop.mapred.QueueManager"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the {@link QueueManager} associated with the JobTracker.]]>
+ </doc>
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="heartbeat" return="org.apache.hadoop.mapred.HeartbeatResponse"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskTrackerStatus"/>
+ <param name="initialContact" type="boolean"/>
+ <param name="acceptNewTasks" type="boolean"/>
+ <param name="responseId" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The periodic heartbeat mechanism between the {@link TaskTracker} and
+ the {@link JobTracker}.
+
+ The {@link JobTracker} processes the status information sent by the
+ {@link TaskTracker} and responds with instructions to start/stop
+ tasks or jobs, and also 'reset' instructions during contingencies.]]>
+ </doc>
+ </method>
+ <method name="getNextHeartbeatInterval" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Calculates next heartbeat interval using cluster size.
+ Heartbeat interval is incremented 1second for every 50 nodes.
+ @return next heartbeat interval.]]>
+ </doc>
+ </method>
+ <method name="getFilesystemName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Grab the local fs name]]>
+ </doc>
+ </method>
+ <method name="reportTaskTrackerError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTracker" type="java.lang.String"/>
+ <param name="errorClass" type="java.lang.String"/>
+ <param name="errorMessage" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNewJobId" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Allocates a new JobId string.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[JobTracker.submitJob() kicks off a new job.
+
+ Create a 'JobInProgress' object, which contains both JobProfile
+ and JobStatus. Those two sub-objects are sometimes shipped outside
+ of the JobTracker. But JobInProgress adds info that's useful for
+ the JobTracker alone.]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="priority" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the priority of a job
+ @param jobid id of the job
+ @param priority new priority of the job]]>
+ </doc>
+ </method>
+ <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getCleanupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getSetupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxEvents" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the diagnostics for a given task
+ @param taskId the id of the task
+ @return an array of the diagnostic messages]]>
+ </doc>
+ </method>
+ <method name="getTip" return="org.apache.hadoop.mapred.TaskInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tipid" type="org.apache.hadoop.mapred.TaskID"/>
+ <doc>
+ <![CDATA[Returns specified TaskInProgress, or null.]]>
+ </doc>
+ </method>
+ <method name="killTask" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a Task to be killed]]>
+ </doc>
+ </method>
+ <method name="getAssignedTracker" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Get tracker name for a given task id.
+ @param taskId the name of the task
+ @return The name of the task tracker]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSystemDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@see org.apache.hadoop.mapred.JobSubmissionProtocol#getSystemDir()]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.JobInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Get the localized job file path on the job trackers local file system
+ @param jobId id of the job
+ @return the path of the job conf file on the local file system]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker process. This is used only for debugging. As a rule,
+ JobTracker should be run as part of the DFS Namenode process.]]>
+ </doc>
+ </method>
+ <method name="getQueues" return="org.apache.hadoop.mapred.JobQueueInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getQueueInfo" return="org.apache.hadoop.mapred.JobQueueInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queue" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getJobsFromQueue" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queue" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[JobTracker is the central location for submitting and
+ tracking MR jobs in a network environment.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <class name="JobTracker.IllegalStateException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobTracker.IllegalStateException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A client tried to submit a job before the Job Tracker was ready.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.State -->
+ <class name="JobTracker.State" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobTracker.State&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobTracker.State[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.State -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <class name="KeyValueLineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="KeyValueLineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="findSeparator" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="sep" type="byte"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class treats a line in the input as a key/value pair separated by a
+ separator character. The separator can be specified in config file
+ under the attribute name key.value.separator.in.input.line. The default
+ separator is the tab character ('\t').]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <class name="KeyValueTextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="KeyValueTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Each line
+ is divided into key and value parts by a separator byte. If no such a byte
+ exists, the key will be the entire line and value will be empty.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader -->
+ <class name="LineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="LineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.LongWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.LongWritable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the progress within the split]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Treats keys as offset in file and value as line.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <class name="LineRecordReader.LineReader" extends="org.apache.hadoop.util.LineReader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.util.LineReader} instead.">
+ <constructor name="LineRecordReader.LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <doc>
+ <![CDATA[A class that provides a line reader from an input stream.
+ @deprecated Use {@link org.apache.hadoop.util.LineReader} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <!-- start class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <class name="MapFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.MapFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getEntry" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readers" type="org.apache.hadoop.io.MapFile.Reader[]"/>
+ <param name="partitioner" type="org.apache.hadoop.mapred.Partitioner&lt;K, V&gt;"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an entry from output generated by this class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link MapFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.Mapper -->
+ <interface name="Mapper" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1"/>
+ <param name="value" type="V1"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Maps a single input key/value pair into an intermediate key/value pair.
+
+ <p>Output pairs need not be of the same types as input pairs. A given
+ input pair may map to zero or many output pairs. Output pairs are
+ collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the input key.
+ @param value the input value.
+ @param output collects mapped keys and values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Maps input key/value pairs to a set of intermediate key/value pairs.
+
+ <p>Maps are the individual tasks which transform input records into a
+ intermediate records. The transformed intermediate records need not be of
+ the same type as the input records. A given input pair may map to zero or
+ many output pairs.</p>
+
+ <p>The Hadoop Map-Reduce framework spawns one map task for each
+ {@link InputSplit} generated by the {@link InputFormat} for the job.
+ <code>Mapper</code> implementations can access the {@link JobConf} for the
+ job via the {@link JobConfigurable#configure(JobConf)} and initialize
+ themselves. Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p>The framework then calls
+ {@link #map(Object, Object, OutputCollector, Reporter)}
+ for each key/value pair in the <code>InputSplit</code> for that task.</p>
+
+ <p>All intermediate values associated with a given output key are
+ subsequently grouped by the framework, and passed to a {@link Reducer} to
+ determine the final output. Users can control the grouping by specifying
+ a <code>Comparator</code> via
+ {@link JobConf#setOutputKeyComparatorClass(Class)}.</p>
+
+ <p>The grouped <code>Mapper</code> outputs are partitioned per
+ <code>Reducer</code>. Users can control which keys (and hence records) go to
+ which <code>Reducer</code> by implementing a custom {@link Partitioner}.
+
+ <p>Users can optionally specify a <code>combiner</code>, via
+ {@link JobConf#setCombinerClass(Class)}, to perform local aggregation of the
+ intermediate outputs, which helps to cut down the amount of data transferred
+ from the <code>Mapper</code> to the <code>Reducer</code>.
+
+ <p>The intermediate, grouped outputs are always stored in
+ {@link SequenceFile}s. Applications can specify if and how the intermediate
+ outputs are to be compressed and which {@link CompressionCodec}s are to be
+ used via the <code>JobConf</code>.</p>
+
+ <p>If the job has
+ <a href="{@docRoot}/org/apache/hadoop/mapred/JobConf.html#ReducerNone">zero
+ reduces</a> then the output of the <code>Mapper</code> is directly written
+ to the {@link FileSystem} without grouping by keys.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyMapper&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Mapper&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String mapTaskId;
+ private String inputFile;
+ private int noRecords = 0;
+
+ public void configure(JobConf job) {
+ mapTaskId = job.get("mapred.task.id");
+ inputFile = job.get("mapred.input.file");
+ }
+
+ public void map(K key, V val,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ // reporter.progress();
+
+ // Process some more
+ // ...
+ // ...
+
+ // Increment the no. of &lt;key, value&gt; pairs processed
+ ++noRecords;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 records update application-level status
+ if ((noRecords%100) == 0) {
+ reporter.setStatus(mapTaskId + " processed " + noRecords +
+ " from input-file: " + inputFile);
+ }
+
+ // Output the result
+ output.collect(key, val);
+ }
+ }
+ </pre></blockquote></p>
+
+ <p>Applications may write a custom {@link MapRunnable} to exert greater
+ control on map processing e.g. multi-threaded <code>Mapper</code>s etc.</p>
+
+ @see JobConf
+ @see InputFormat
+ @see Partitioner
+ @see Reducer
+ @see MapReduceBase
+ @see MapRunnable
+ @see SequenceFile]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Mapper -->
+ <!-- start class org.apache.hadoop.mapred.MapReduceBase -->
+ <class name="MapReduceBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="MapReduceBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for {@link Mapper} and {@link Reducer} implementations.
+
+ <p>Provides default no-op implementations for a few methods, most non-trivial
+ applications need to override some of them.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapReduceBase -->
+ <!-- start interface org.apache.hadoop.mapred.MapRunnable -->
+ <interface name="MapRunnable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start mapping input <tt>&lt;key, value&gt;</tt> pairs.
+
+ <p>Mapping of input records to output records is complete when this method
+ returns.</p>
+
+ @param input the {@link RecordReader} to read the input records.
+ @param output the {@link OutputCollector} to collect the outputrecords.
+ @param reporter {@link Reporter} to report progress, status-updates etc.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Expert: Generic interface for {@link Mapper}s.
+
+ <p>Custom implementations of <code>MapRunnable</code> can exert greater
+ control on map processing e.g. multi-threaded, asynchronous mappers etc.</p>
+
+ @see Mapper]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.MapRunnable -->
+ <!-- start class org.apache.hadoop.mapred.MapRunner -->
+ <class name="MapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="MapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Default {@link MapRunnable} implementation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapRunner -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <class name="MultiFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;K, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultiFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s
+ in {@link #getSplits(JobConf, int)} method. Splits are constructed from
+ the files under the input paths. Each split returned contains <i>nearly</i>
+ equal content length. <br>
+ Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)}
+ to construct <code>RecordReader</code>'s for <code>MultiFileSplit</code>'s.
+ @see MultiFileSplit]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileSplit -->
+ <class name="MultiFileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="MultiFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLengths" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array containing the lengths of the files in
+ the split]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the length of the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getNumPaths" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all the Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A sub-collection of input files. Unlike {@link FileSplit}, MultiFileSplit
+ class does not represent a split of a file, but a split of input files
+ into smaller sets. The atomic unit of split is a file. <br>
+ MultiFileSplit can be used to implement {@link RecordReader}'s, with
+ reading one record per file.
+ @see FileSplit
+ @see MultiFileInputFormat]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileSplit -->
+ <!-- start interface org.apache.hadoop.mapred.OutputCollector -->
+ <interface name="OutputCollector" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="collect"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Adds a key/value pair to the output.
+
+ @param key the key to collect.
+ @param value to value to collect.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Collects the <code>&lt;key, value&gt;</code> pairs output by {@link Mapper}s
+ and {@link Reducer}s.
+
+ <p><code>OutputCollector</code> is the generalization of the facility
+ provided by the Map-Reduce framework to collect data output by either the
+ <code>Mapper</code> or the <code>Reducer</code> i.e. intermediate outputs
+ or the output of the job.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputCollector -->
+ <!-- start class org.apache.hadoop.mapred.OutputCommitter -->
+ <class name="OutputCommitter" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OutputCommitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setupJob"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobContext" type="org.apache.hadoop.mapred.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For the framework to setup the job output during initialization
+
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException if temporary output could not be created]]>
+ </doc>
+ </method>
+ <method name="cleanupJob"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobContext" type="org.apache.hadoop.mapred.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For cleaning up the job's output after job completion
+
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setupTask"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets up output for the task.
+
+ @param taskContext Context of the task whose output is being written.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="needsTaskCommit" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check whether task needs a commit
+
+ @param taskContext
+ @return true/false
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="commitTask"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[To promote the task's temporary output to final output location
+
+ The task's output is moved to the job's output directory.
+
+ @param taskContext Context of the task whose output is being written.
+ @throws IOException if commit is not]]>
+ </doc>
+ </method>
+ <method name="abortTask"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Discard the task output
+
+ @param taskContext
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>OutputCommitter</code> describes the commit of task output for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputCommitter</code> of
+ the job to:<p>
+ <ol>
+ <li>
+ Setup the job during initialization. For example, create the temporary
+ output directory for the job during the initialization of the job.
+ </li>
+ <li>
+ Cleanup the job after the job completion. For example, remove the
+ temporary output directory after the job completion.
+ </li>
+ <li>
+ Setup the task temporary output.
+ </li>
+ <li>
+ Check whether a task needs a commit. This is to avoid the commit
+ procedure if a task does not need commit.
+ </li>
+ <li>
+ Commit of the task output.
+ </li>
+ <li>
+ Discard the task commit.
+ </li>
+ </ol>
+
+ @see FileOutputCommitter
+ @see JobContext
+ @see TaskAttemptContext]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputCommitter -->
+ <!-- start interface org.apache.hadoop.mapred.OutputFormat -->
+ <interface name="OutputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordWriter} for the given job.
+
+ @param ignored
+ @param job configuration for the job whose output is being written.
+ @param name the unique name for this part of the output.
+ @param progress mechanism for reporting progress while writing to file.
+ @return a {@link RecordWriter} to write the output for the job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check for validity of the output-specification for the job.
+
+ <p>This is to validate the output specification for the job when it is
+ a job is submitted. Typically checks that it does not already exist,
+ throwing an exception when it already exists, so that output is not
+ overwritten.</p>
+
+ @param ignored
+ @param job job configuration.
+ @throws IOException when output should not be attempted]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>OutputFormat</code> describes the output-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the output-specification of the job. For e.g. check that the
+ output directory doesn't already exist.
+ <li>
+ Provide the {@link RecordWriter} implementation to be used to write out
+ the output files of the job. Output files are stored in a
+ {@link FileSystem}.
+ </li>
+ </ol>
+
+ @see RecordWriter
+ @see JobConf]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.OutputLogFilter -->
+ <class name="OutputLogFilter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.PathFilter"/>
+ <constructor name="OutputLogFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <doc>
+ <![CDATA[This class filters log files from directory given
+ It doesnt accept paths having _logs.
+ This can be used to list paths of output directory as follows:
+ Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
+ new OutputLogFilter()));]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputLogFilter -->
+ <!-- start interface org.apache.hadoop.mapred.Partitioner -->
+ <interface name="Partitioner" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numPartitions" type="int"/>
+ <doc>
+ <![CDATA[Get the paritition number for a given key (hence record) given the total
+ number of partitions i.e. number of reduce-tasks for the job.
+
+ <p>Typically a hash function on a all or a subset of the key.</p>
+
+ @param key the key to be paritioned.
+ @param value the entry value.
+ @param numPartitions the total number of partitions.
+ @return the partition number for the <code>key</code>.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partitions the key space.
+
+ <p><code>Partitioner</code> controls the partitioning of the keys of the
+ intermediate map-outputs. The key (or a subset of the key) is used to derive
+ the partition, typically by a hash function. The total number of partitions
+ is the same as the number of reduce tasks for the job. Hence this controls
+ which of the <code>m</code> reduce tasks the intermediate key (and hence the
+ record) is sent for reduction.</p>
+
+ @see Reducer]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Partitioner -->
+ <!-- start interface org.apache.hadoop.mapred.RecordReader -->
+ <interface name="RecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the next key/value pair from the input for processing.
+
+ @param key the key to read data into
+ @param value the value to read data into
+ @return true iff a key/value was read, false if at EOF]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a key.
+
+ @return a new key object.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a value.
+
+ @return a new value object.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current position in the input.
+
+ @return the current position in the input.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this {@link InputSplit} to future operations.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[How much of the input has the {@link RecordReader} consumed i.e.
+ has been processed by?
+
+ @return progress from <code>0.0</code> to <code>1.0</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordReader</code> reads &lt;key, value&gt; pairs from an
+ {@link InputSplit}.
+
+ <p><code>RecordReader</code>, typically, converts the byte-oriented view of
+ the input, provided by the <code>InputSplit</code>, and presents a
+ record-oriented view for the {@link Mapper} & {@link Reducer} tasks for
+ processing. It thus assumes the responsibility of processing record
+ boundaries and presenting the tasks with keys and values.</p>
+
+ @see InputSplit
+ @see InputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordReader -->
+ <!-- start interface org.apache.hadoop.mapred.RecordWriter -->
+ <interface name="RecordWriter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes a key/value pair.
+
+ @param key the key to write.
+ @param value the value to write.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this <code>RecordWriter</code> to future operations.
+
+ @param reporter facility to report progress.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordWriter</code> writes the output &lt;key, value&gt; pairs
+ to an output file.
+
+ <p><code>RecordWriter</code> implementations write the job outputs to the
+ {@link FileSystem}.
+
+ @see OutputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordWriter -->
+ <!-- start interface org.apache.hadoop.mapred.Reducer -->
+ <interface name="Reducer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="values" type="java.util.Iterator&lt;V2&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K3, V3&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<i>Reduces</i> values for a given key.
+
+ <p>The framework calls this method for each
+ <code>&lt;key, (list of values)></code> pair in the grouped inputs.
+ Output values must be of the same type as input values. Input keys must
+ not be altered. The framework will <b>reuse</b> the key and value objects
+ that are passed into the reduce, therefore the application should clone
+ the objects they want to keep a copy of. In many cases, all values are
+ combined into zero or one value.
+ </p>
+
+ <p>Output pairs are collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the key.
+ @param values the list of values to reduce.
+ @param output to collect keys and combined values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reduces a set of intermediate values which share a key to a smaller set of
+ values.
+
+ <p>The number of <code>Reducer</code>s for the job is set by the user via
+ {@link JobConf#setNumReduceTasks(int)}. <code>Reducer</code> implementations
+ can access the {@link JobConf} for the job via the
+ {@link JobConfigurable#configure(JobConf)} method and initialize themselves.
+ Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p><code>Reducer</code> has 3 primary phases:</p>
+ <ol>
+ <li>
+
+ <h4 id="Shuffle">Shuffle</h4>
+
+ <p><code>Reducer</code> is input the grouped output of a {@link Mapper}.
+ In the phase the framework, for each <code>Reducer</code>, fetches the
+ relevant partition of the output of all the <code>Mapper</code>s, via HTTP.
+ </p>
+ </li>
+
+ <li>
+ <h4 id="Sort">Sort</h4>
+
+ <p>The framework groups <code>Reducer</code> inputs by <code>key</code>s
+ (since different <code>Mapper</code>s may have output the same key) in this
+ stage.</p>
+
+ <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
+ being fetched they are merged.</p>
+
+ <h5 id="SecondarySort">SecondarySort</h5>
+
+ <p>If equivalence rules for keys while grouping the intermediates are
+ different from those for grouping keys before reduction, then one may
+ specify a <code>Comparator</code> via
+ {@link JobConf#setOutputValueGroupingComparator(Class)}.Since
+ {@link JobConf#setOutputKeyComparatorClass(Class)} can be used to
+ control how intermediate keys are grouped, these can be used in conjunction
+ to simulate <i>secondary sort on values</i>.</p>
+
+
+ For example, say that you want to find duplicate web pages and tag them
+ all with the url of the "best" known example. You would set up the job
+ like:
+ <ul>
+ <li>Map Input Key: url</li>
+ <li>Map Input Value: document</li>
+ <li>Map Output Key: document checksum, url pagerank</li>
+ <li>Map Output Value: url</li>
+ <li>Partitioner: by checksum</li>
+ <li>OutputKeyComparator: by checksum and then decreasing pagerank</li>
+ <li>OutputValueGroupingComparator: by checksum</li>
+ </ul>
+ </li>
+
+ <li>
+ <h4 id="Reduce">Reduce</h4>
+
+ <p>In this phase the
+ {@link #reduce(Object, Iterator, OutputCollector, Reporter)}
+ method is called for each <code>&lt;key, (list of values)></code> pair in
+ the grouped inputs.</p>
+ <p>The output of the reduce task is typically written to the
+ {@link FileSystem} via
+ {@link OutputCollector#collect(Object, Object)}.</p>
+ </li>
+ </ol>
+
+ <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyReducer&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Reducer&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String reduceTaskId;
+ private int noKeys = 0;
+
+ public void configure(JobConf job) {
+ reduceTaskId = job.get("mapred.task.id");
+ }
+
+ public void reduce(K key, Iterator&lt;V&gt; values,
+ OutputCollector&lt;K, V&gt; output,
+ Reporter reporter)
+ throws IOException {
+
+ // Process
+ int noValues = 0;
+ while (values.hasNext()) {
+ V value = values.next();
+
+ // Increment the no. of values for this key
+ ++noValues;
+
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ if ((noValues%10) == 0) {
+ reporter.progress();
+ }
+
+ // Process some more
+ // ...
+ // ...
+
+ // Output the &lt;key, value&gt;
+ output.collect(key, value);
+ }
+
+ // Increment the no. of &lt;key, list of values&gt; pairs processed
+ ++noKeys;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 keys update application-level status
+ if ((noKeys%100) == 0) {
+ reporter.setStatus(reduceTaskId + " processed " + noKeys);
+ }
+ }
+ }
+ </pre></blockquote></p>
+
+ @see Mapper
+ @see Partitioner
+ @see Reporter
+ @see MapReduceBase]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reducer -->
+ <!-- start interface org.apache.hadoop.mapred.Reporter -->
+ <interface name="Reporter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Progressable"/>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the status description for the task.
+
+ @param status brief description of the current status.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the {@link Counter} of the given group with the given name.
+
+ @param group counter group
+ @param name counter name
+ @return the <code>Counter</code> of the given group/name.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the key, which can be of
+ any {@link Enum} type, by the specified amount.
+
+ @param key key to identify the counter to be incremented. The key can be
+ be any <code>Enum</code>.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="counter" type="java.lang.String"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the group and counter name
+ by the specified amount.
+
+ @param group name to identify the group of the counter to be incremented.
+ @param counter name to identify the counter within the group.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="getInputSplit" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+ <doc>
+ <![CDATA[Get the {@link InputSplit} object for a map.
+
+ @return the <code>InputSplit</code> that the map is reading from.
+ @throws UnsupportedOperationException if called outside a mapper]]>
+ </doc>
+ </method>
+ <field name="NULL" type="org.apache.hadoop.mapred.Reporter"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A constant of Reporter type that does nothing.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A facility for Map-Reduce applications to report progress and update
+ counters, status information etc.
+
+ <p>{@link Mapper} and {@link Reducer} can use the <code>Reporter</code>
+ provided to report progress or just indicate that they are alive. In
+ scenarios where the application takes an insignificant amount of time to
+ process individual key/value pairs, this is crucial since the framework
+ might assume that the task has timed-out and kill that task.
+
+ <p>Applications can also update {@link Counters} via the provided
+ <code>Reporter</code> .</p>
+
+ @see Progressable
+ @see Counters]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reporter -->
+ <!-- start interface org.apache.hadoop.mapred.RunningJob -->
+ <interface name="RunningJob" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job identifier.
+
+ @return the job identifier.]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="This method is deprecated and will be removed. Applications should
+ rather use {@link #getID()}.">
+ <doc>
+ <![CDATA[@deprecated This method is deprecated and will be removed. Applications should
+ rather use {@link #getID()}.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the job.
+
+ @return the name of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the path of the submitted job configuration.
+
+ @return the path of the submitted job configuration.]]>
+ </doc>
+ </method>
+ <method name="getTrackingURL" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the URL where some job progress information will be displayed.
+
+ @return the URL where some job progress information will be displayed.]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's map-tasks, as a float between 0.0
+ and 1.0. When all map tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's map-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0
+ and 1.0. When all reduce tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's reduce-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cleanupProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's cleanup-tasks, as a float between 0.0
+ and 1.0. When all cleanup tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's cleanup-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setupProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's setup-tasks, as a float between 0.0
+ and 1.0. When all setup tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's setup-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isComplete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job is finished or not.
+ This is a non-blocking call.
+
+ @return <code>true</code> if the job is complete, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isSuccessful" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job completed successfully.
+
+ @return <code>true</code> if the job succeeded, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="waitForCompletion"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Blocks until the job is complete.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJobState" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current state of the Job.
+ {@link JobStatus}
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill the running job. Blocks until all job tasks have been
+ killed as well. If the job is no longer running, it simply returns.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="priority" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the priority of a running job.
+ @param priority the new priority for the job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="startFrom" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get events indicating completion (success/failure) of component tasks.
+
+ @param startFrom index to start fetching events from
+ @return an array of {@link TaskCompletionEvent}s
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill indicated task attempt.
+
+ @param taskId the id of the task to be terminated.
+ @param shouldFail if true the task is failed and added to failed tasks
+ list, otherwise it is just killed, w/o affecting
+ job failure status.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #killTask(TaskAttemptID, boolean)}">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the counters for this job.
+
+ @return the counters for this job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RunningJob</code> is the user-interface to query for details on a
+ running Map-Reduce job.
+
+ <p>Clients can get hold of <code>RunningJob</code> via the {@link JobClient}
+ and then query the running-job for details such as name, configuration,
+ progress etc.</p>
+
+ @see JobClient]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RunningJob -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <class name="SequenceFileAsBinaryInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[InputFormat reading keys, values from SequenceFiles in binary (raw)
+ format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <class name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"/>
+ <constructor name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the key class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getKeyClassName]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the value class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getValueClassName]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.BytesWritable"/>
+ <param name="val" type="org.apache.hadoop.io.BytesWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read raw bytes from a SequenceFile.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Read records from a SequenceFile as binary (raw) bytes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+ <class name="SequenceFileAsBinaryOutputFormat" extends="org.apache.hadoop.mapred.SequenceFileOutputFormat&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setSequenceFileOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the {@link SequenceFile}
+ <p>This allows the user to specify the key class to be different
+ from the actual class ({@link BytesWritable}) used for writing </p>
+
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+ </doc>
+ </method>
+ <method name="setSequenceFileOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for the {@link SequenceFile}
+ <p>This allows the user to specify the value class to be different
+ from the actual class ({@link BytesWritable}) used for writing </p>
+
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+ </doc>
+ </method>
+ <method name="getSequenceFileOutputKeyClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the key class for the {@link SequenceFile}
+
+ @return the key class of the {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <method name="getSequenceFileOutputValueClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the value class for the {@link SequenceFile}
+
+ @return the value class of the {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes keys, values to
+ {@link SequenceFile}s in binary(raw) format]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
+ <class name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" type="org.apache.hadoop.io.BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.BytesWritable"/>
+ </method>
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Inner class used for appendRaw]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <class name="SequenceFileAsTextInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is similar to SequenceFileInputFormat, except it generates SequenceFileAsTextRecordReader
+ which converts the input keys and values to their String forms by calling toString() method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <class name="SequenceFileAsTextRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="SequenceFileAsTextRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class converts the input keys and values to their String forms by calling toString()
+ method. This class to SequenceFileAsTextInputFormat class is as LineRecordReader
+ class to TextInputFormat class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <class name="SequenceFileInputFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a record reader for the given split
+ @param split file split
+ @param job job configuration
+ @param reporter reporter who sends report to task tracker
+ @return RecordReader]]>
+ </doc>
+ </method>
+ <method name="setFilterClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="filterClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[set the filter class
+
+ @param conf application configuration
+ @param filterClass filter class]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that allows a map/red job to work on a sample of sequence files.
+ The sample is decided by the filter class set by the job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <!-- start interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <interface name="SequenceFileInputFilter.Filter" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[filter function
+ Decide if a record should be filtered or not
+ @param key record key
+ @return true if a record is accepted; return false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[filter interface]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <class name="SequenceFileInputFilter.FilterBase" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.SequenceFileInputFilter.Filter"/>
+ <constructor name="SequenceFileInputFilter.FilterBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[base class for Filters]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <class name="SequenceFileInputFilter.MD5Filter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.MD5Filter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the filtering frequency in configuration
+
+ @param conf configuration
+ @param frequency filtering frequency]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter according to configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If MD5(key) % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class returns a set of records by examing the MD5 digest of its
+ key against a filtering frequency <i>f</i>. The filtering criteria is
+ MD5(key) % f == 0.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <class name="SequenceFileInputFilter.PercentFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.PercentFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the frequency and stores it in conf
+ @param conf configuration
+ @param frequency filtering frequencey]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter by checking the configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If record# % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class returns a percentage of records
+ The percentage is determined by a filtering frequency <i>f</i> using
+ the criteria record# % f == 0.
+ For example, if the frequency is 10, one out of 10 records is returned.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <class name="SequenceFileInputFilter.RegexFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.RegexFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setPattern"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="regex" type="java.lang.String"/>
+ <exception name="PatternSyntaxException" type="java.util.regex.PatternSyntaxException"/>
+ <doc>
+ <![CDATA[Define the filtering regex and stores it in conf
+ @param conf where the regex is set
+ @param regex regex used as a filter]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the Filter by checking the configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If key matches the regex, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Records filter by matching key to regex]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <class name="SequenceFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <class name="SequenceFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.SequenceFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf}
+ @return the {@link CompressionType} for the output {@link SequenceFile},
+ defaulting to {@link CompressionType#RECORD}]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf} to modify
+ @param style the {@link CompressionType} for the output
+ {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <class name="SequenceFileRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"/>
+ <constructor name="SequenceFileRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of key that must be passed to {@link
+ #next(Object, Object)}..]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of value that must be passed to {@link
+ #next(Object, Object)}..]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="conf" type="org.apache.hadoop.conf.Configuration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An {@link RecordReader} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SkipBadRecords -->
+ <class name="SkipBadRecords" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SkipBadRecords"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getAttemptsToStartSkipping" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the number of Task attempts AFTER which skip mode
+ will be kicked off. When skip mode is kicked off, the
+ tasks reports the range of records which it will process
+ next to the TaskTracker. So that on failures, TT knows which
+ ones are possibly the bad records. On further executions,
+ those are skipped.
+ Default value is 2.
+
+ @param conf the configuration
+ @return attemptsToStartSkipping no of task attempts]]>
+ </doc>
+ </method>
+ <method name="setAttemptsToStartSkipping"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attemptsToStartSkipping" type="int"/>
+ <doc>
+ <![CDATA[Set the number of Task attempts AFTER which skip mode
+ will be kicked off. When skip mode is kicked off, the
+ tasks reports the range of records which it will process
+ next to the TaskTracker. So that on failures, TT knows which
+ ones are possibly the bad records. On further executions,
+ those are skipped.
+ Default value is 2.
+
+ @param conf the configuration
+ @param attemptsToStartSkipping no of task attempts]]>
+ </doc>
+ </method>
+ <method name="getAutoIncrMapperProcCount" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the flag which if set to true,
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented
+ by MapRunner after invoking the map function. This value must be set to
+ false for applications which process the records asynchronously
+ or buffer the input records. For example streaming.
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+
+ @param conf the configuration
+ @return <code>true</code> if auto increment
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}.
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setAutoIncrMapperProcCount"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="autoIncr" type="boolean"/>
+ <doc>
+ <![CDATA[Set the flag which if set to true,
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented
+ by MapRunner after invoking the map function. This value must be set to
+ false for applications which process the records asynchronously
+ or buffer the input records. For example streaming.
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+
+ @param conf the configuration
+ @param autoIncr whether to auto increment
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}.]]>
+ </doc>
+ </method>
+ <method name="getAutoIncrReducerProcCount" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the flag which if set to true,
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS} is incremented
+ by framework after invoking the reduce function. This value must be set to
+ false for applications which process the records asynchronously
+ or buffer the input records. For example streaming.
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+
+ @param conf the configuration
+ @return <code>true</code> if auto increment
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}.
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setAutoIncrReducerProcCount"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="autoIncr" type="boolean"/>
+ <doc>
+ <![CDATA[Set the flag which if set to true,
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS} is incremented
+ by framework after invoking the reduce function. This value must be set to
+ false for applications which process the records asynchronously
+ or buffer the input records. For example streaming.
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+
+ @param conf the configuration
+ @param autoIncr whether to auto increment
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}.]]>
+ </doc>
+ </method>
+ <method name="getSkipOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the directory to which skipped records are written. By default it is
+ the sub directory of the output _logs directory.
+ User can stop writing skipped records by setting the value null.
+
+ @param conf the configuration.
+ @return path skip output directory. Null is returned if this is not set
+ and output directory is also not set.]]>
+ </doc>
+ </method>
+ <method name="setSkipOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the directory to which skipped records are written. By default it is
+ the sub directory of the output _logs directory.
+ User can stop writing skipped records by setting the value null.
+
+ @param conf the configuration.
+ @param path skip output directory path]]>
+ </doc>
+ </method>
+ <method name="getMapperMaxSkipRecords" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the number of acceptable skip records surrounding the bad record PER
+ bad record in mapper. The number includes the bad record as well.
+ To turn the feature of detection/skipping of bad records off, set the
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying
+ until this threshold is met OR all attempts get exhausted for this task.
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to
+ narrow down. Whatever records(depends on application) get skipped are
+ acceptable.
+ Default value is 0.
+
+ @param conf the configuration
+ @return maxSkipRecs acceptable skip records.]]>
+ </doc>
+ </method>
+ <method name="setMapperMaxSkipRecords"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="maxSkipRecs" type="long"/>
+ <doc>
+ <![CDATA[Set the number of acceptable skip records surrounding the bad record PER
+ bad record in mapper. The number includes the bad record as well.
+ To turn the feature of detection/skipping of bad records off, set the
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying
+ until this threshold is met OR all attempts get exhausted for this task.
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to
+ narrow down. Whatever records(depends on application) get skipped are
+ acceptable.
+ Default value is 0.
+
+ @param conf the configuration
+ @param maxSkipRecs acceptable skip records.]]>
+ </doc>
+ </method>
+ <method name="getReducerMaxSkipGroups" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the number of acceptable skip groups surrounding the bad group PER
+ bad group in reducer. The number includes the bad group as well.
+ To turn the feature of detection/skipping of bad groups off, set the
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying
+ until this threshold is met OR all attempts get exhausted for this task.
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to
+ narrow down. Whatever groups(depends on application) get skipped are
+ acceptable.
+ Default value is 0.
+
+ @param conf the configuration
+ @return maxSkipGrps acceptable skip groups.]]>
+ </doc>
+ </method>
+ <method name="setReducerMaxSkipGroups"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="maxSkipGrps" type="long"/>
+ <doc>
+ <![CDATA[Set the number of acceptable skip groups surrounding the bad group PER
+ bad group in reducer. The number includes the bad group as well.
+ To turn the feature of detection/skipping of bad groups off, set the
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying
+ until this threshold is met OR all attempts get exhausted for this task.
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to
+ narrow down. Whatever groups(depends on application) get skipped are
+ acceptable.
+ Default value is 0.
+
+ @param conf the configuration
+ @param maxSkipGrps acceptable skip groups.]]>
+ </doc>
+ </method>
+ <field name="COUNTER_GROUP" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Special counters which are written by the application and are
+ used by the framework for detecting bad records. For detecting bad records
+ these counters must be incremented by the application.]]>
+ </doc>
+ </field>
+ <field name="COUNTER_MAP_PROCESSED_RECORDS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of processed map records.
+ @see SkipBadRecords#getAutoIncrMapperProcCount(Configuration)]]>
+ </doc>
+ </field>
+ <field name="COUNTER_REDUCE_PROCESSED_GROUPS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of processed reduce groups.
+ @see SkipBadRecords#getAutoIncrReducerProcCount(Configuration)]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Utility class for skip bad records functionality. It contains various
+ settings related to skipping of bad records.
+
+ <p>Hadoop provides an optional mode of execution in which the bad records
+ are detected and skipped in further attempts.
+
+ <p>This feature can be used when map/reduce tasks crashes deterministically on
+ certain input. This happens due to bugs in the map/reduce function. The usual
+ course would be to fix these bugs. But sometimes this is not possible;
+ perhaps the bug is in third party libraries for which the source code is
+ not available. Due to this, the task never reaches to completion even with
+ multiple attempts and complete data for that task is lost.</p>
+
+ <p>With this feature, only a small portion of data is lost surrounding
+ the bad record, which may be acceptable for some user applications.
+ see {@link SkipBadRecords#setMapperMaxSkipRecords(Configuration, long)}</p>
+
+ <p>The skipping mode gets kicked off after certain no of failures
+ see {@link SkipBadRecords#setAttemptsToStartSkipping(Configuration, int)}</p>
+
+ <p>In the skipping mode, the map/reduce task maintains the record range which
+ is getting processed at all times. Before giving the input to the
+ map/reduce function, it sends this record range to the Task tracker.
+ If task crashes, the Task tracker knows which one was the last reported
+ range. On further attempts that range get skipped.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SkipBadRecords -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer -->
+ <class name="StatusHttpServer" extends="org.apache.hadoop.http.HttpServer"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A mapred http server.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet -->
+ <class name="StatusHttpServer.TaskGraphServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer.TaskGraphServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="width" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="height" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="ymargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on y axis]]>
+ </doc>
+ </field>
+ <field name="xmargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on x axis]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The servlet that outputs svg graphics for map / reduce task
+ statuses]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskAttemptContext -->
+ <class name="TaskAttemptContext" extends="org.apache.hadoop.mapred.JobContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTaskAttemptID" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the taskAttemptID.
+
+ @return TaskAttemptID]]>
+ </doc>
+ </method>
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job Configuration.
+
+ @return JobConf]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskAttemptContext -->
+ <!-- start class org.apache.hadoop.mapred.TaskAttemptID -->
+ <class name="TaskAttemptID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskAttemptID" type="org.apache.hadoop.mapred.TaskID, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskAttemptID object from given {@link TaskID}.
+ @param taskId TaskID that this task belongs to
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskAttemptID" type="java.lang.String, int, boolean, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param taskId taskId number
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link JobID} object that this task attempt belongs to]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link TaskID} object that this task attempt belongs to]]>
+ </doc>
+ </method>
+ <method name="isMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns whether this TaskAttemptID is a map ID]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare TaskIds by first tipIds, then by task numbers.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskAttemptID object from given string
+ @return constructed TaskAttemptID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getTaskAttemptIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <param name="isMap" type="java.lang.Boolean"/>
+ <param name="taskId" type="java.lang.Integer"/>
+ <param name="attemptId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task attempt IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>all task attempt IDs</i>
+ of <i>any jobtracker</i>, in <i>any job</i>, of the <i>first
+ map task</i>, we would use :
+ <pre>
+ TaskAttemptID.getTaskAttemptIDsPattern(null, null, true, 1, null);
+ </pre>
+ which will return :
+ <pre> "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null
+ @param taskId taskId number, or null
+ @param attemptId the task attempt number, or null
+ @return a regex pattern matching TaskAttemptIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[TaskAttemptID represents the immutable and unique identifier for
+ a task attempt. Each task attempt is one particular instance of a Map or
+ Reduce Task identified by its TaskID.
+
+ TaskAttemptID consists of 2 parts. First part is the
+ {@link TaskID}, that this TaskAttemptID belongs to.
+ Second part is the task attempt number. <br>
+ An example TaskAttemptID is :
+ <code>attempt_200707121733_0003_m_000005_0</code> , which represents the
+ zeroth task attempt for the fifth map task in the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskAttemptID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskAttemptID -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <class name="TaskCompletionEvent" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskCompletionEvent"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor for Writable.]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskCompletionEvent" type="int, org.apache.hadoop.mapred.TaskAttemptID, int, boolean, org.apache.hadoop.mapred.TaskCompletionEvent.Status, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor. eventId should be created externally and incremented
+ per event for each job.
+ @param eventId event id, event id should be unique and assigned in
+ incrementally, starting from 0.
+ @param taskId task id
+ @param status task's status
+ @param taskTrackerHttp task tracker's host:port for http.]]>
+ </doc>
+ </constructor>
+ <method name="getEventId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns event Id.
+ @return event id]]>
+ </doc>
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getTaskAttemptId()} instead.">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id
+ @deprecated use {@link #getTaskAttemptId()} instead.]]>
+ </doc>
+ </method>
+ <method name="getTaskAttemptId" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id]]>
+ </doc>
+ </method>
+ <method name="getTaskStatus" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns enum Status.SUCESS or Status.FAILURE.
+ @return task tracker status]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerHttp" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[http location of the tasktracker where this task ran.
+ @return http location of tasktracker user logs]]>
+ </doc>
+ </method>
+ <method name="getTaskRunTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns time (in millisec) the task took to complete.]]>
+ </doc>
+ </method>
+ <method name="setTaskRunTime"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskCompletionTime" type="int"/>
+ <doc>
+ <![CDATA[Set the task completion time
+ @param taskCompletionTime time (in millisec) the task took to complete]]>
+ </doc>
+ </method>
+ <method name="setEventId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="eventId" type="int"/>
+ <doc>
+ <![CDATA[set event Id. should be assigned incrementally starting from 0.
+ @param eventId]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #setTaskID(TaskAttemptID)} instead.">
+ <param name="taskId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId
+ @deprecated use {@link #setTaskID(TaskAttemptID)} instead.]]>
+ </doc>
+ </method>
+ <method name="setTaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId]]>
+ </doc>
+ </method>
+ <method name="setTaskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"/>
+ <doc>
+ <![CDATA[Set task status.
+ @param status]]>
+ </doc>
+ </method>
+ <method name="setTaskTrackerHttp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTrackerHttp" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set task tracker http location.
+ @param taskTrackerHttp]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isMapTask" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="idWithinJob" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="EMPTY_ARRAY" type="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is used to track task completion events on
+ job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <class name="TaskCompletionEvent.Status" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.TaskCompletionEvent.Status&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <!-- start class org.apache.hadoop.mapred.TaskID -->
+ <class name="TaskID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskID" type="org.apache.hadoop.mapred.JobID, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskID object from given {@link JobID}.
+ @param jobId JobID that this tip belongs to
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskID" type="java.lang.String, int, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskInProgressId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link JobID} object that this tip belongs to]]>
+ </doc>
+ </method>
+ <method name="isMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns whether this TaskID is a map ID]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare TaskInProgressIds by first jobIds, then by tip numbers. Reduces are
+ defined as greater then maps.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskID object from given string
+ @return constructed TaskID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getTaskIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <param name="isMap" type="java.lang.Boolean"/>
+ <param name="taskId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>the first map task</i>
+ of <i>any jobtracker</i>, of <i>any job</i>, we would use :
+ <pre>
+ TaskID.getTaskIDsPattern(null, null, true, 1);
+ </pre>
+ which will return :
+ <pre> "task_[^_]*_[0-9]*_m_000001*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null
+ @param taskId taskId number, or null
+ @return a regex pattern matching TaskIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[TaskID represents the immutable and unique identifier for
+ a Map or Reduce Task. Each TaskID encompasses multiple attempts made to
+ execute the Map or Reduce Task, each of which are uniquely indentified by
+ their TaskAttemptID.
+
+ TaskID consists of 3 parts. First part is the {@link JobID}, that this
+ TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r'
+ representing whether the task is a map task or a reduce task.
+ And the third part is the task number. <br>
+ An example TaskID is :
+ <code>task_200707121733_0003_m_000005</code> , which represents the
+ fifth map task in the third job running at the jobtracker
+ started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskID -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog -->
+ <class name="TaskLog" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLog"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskLogFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="getRealTaskLogFileLocation" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="getIndexFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ </method>
+ <method name="syncLogs"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="firstTaskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logsRetainHours" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Purge old user logs.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskLogLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the desired maximum length of task's logs.
+ @param conf the job to look in
+ @return the number of bytes to cap the log files at]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ If the tailLength is 0, the entire output will be saved.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="setup" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ Setup commands such as setting memory limit can be passed which
+ will be executed before exec.
+ If the tailLength is 0, the entire output will be saved.
+ @param setup The setup commands for the execed process.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="setup" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <param name="pidFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ Setup commands such as setting memory limit can be passed which
+ will be executed before exec.
+ If the tailLength is 0, the entire output will be saved.
+ @param setup The setup commands for the execed process.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @param pidFileName The name of the pid-file
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="addCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="isExecutable" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add quotes to each of the command strings and
+ return as a single string
+ @param cmd The command to be quoted
+ @param isExecutable makes shell path if the first
+ argument is executable
+ @return returns The quoted string.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="captureDebugOut" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="debugoutFilename" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture debug script's
+ stdout and stderr to debugout.
+ @param cmd The command and the arguments that should be run
+ @param debugoutFilename The filename that stdout and stderr
+ should be saved to.
+ @return the modified command that should be run
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple logger to handle the task-specific user logs.
+ This class uses the system property <code>hadoop.log.dir</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <class name="TaskLog.LogName" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.TaskLog.LogName&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskLog.LogName[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskLog.LogName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The filter for userlogs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogAppender -->
+ <class name="TaskLogAppender" extends="org.apache.log4j.FileAppender"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogAppender"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="activateOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Getter/Setter methods for log4j.]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ </method>
+ <method name="getTotalLogFileSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setTotalLogFileSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logSize" type="long"/>
+ </method>
+ <doc>
+ <![CDATA[A simple log4j-appender for the task child's
+ map-reduce system logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogAppender -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogServlet -->
+ <class name="TaskLogServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskLogUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTrackerHostName" type="java.lang.String"/>
+ <param name="httpPort" type="java.lang.String"/>
+ <param name="taskAttemptID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Construct the taskLogUrl
+ @param taskTrackerHostName
+ @param httpPort
+ @param taskAttemptID
+ @return the taskLogUrl]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the logs via http.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A servlet that is run by the TaskTrackers to provide the task logs via http.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskReport -->
+ <class name="TaskReport" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskReport"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getTaskID()} instead">
+ <doc>
+ <![CDATA[@deprecated use {@link #getTaskID()} instead]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The id of the task.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The amount completed, between zero and one.]]>
+ </doc>
+ </method>
+ <method name="getState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The most recent state, reported by a {@link Reporter}.]]>
+ </doc>
+ </method>
+ <method name="getDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A list of error messages.]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A table of counters.]]>
+ </doc>
+ </method>
+ <method name="getFinishTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get finish time of task.
+ @return 0, if finish time was not set else returns finish time.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get start time of task.
+ @return 0 if start time was not set, else start time.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A report on the state of a task.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskReport -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker -->
+ <class name="TaskTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.TaskUmbilicalProtocol"/>
+ <implements name="java.lang.Runnable"/>
+ <constructor name="TaskTracker" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start with the local machine name, and the default JobTracker]]>
+ </doc>
+ </constructor>
+ <method name="getTaskTrackerInstrumentation" return="org.apache.hadoop.mapred.TaskTrackerInstrumentation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getInstrumentationClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.TaskTrackerInstrumentation&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setInstrumentationClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="t" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.TaskTrackerInstrumentation&gt;"/>
+ </method>
+ <method name="cleanupStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Removes all contents of temporary storage. Called upon
+ startup, to remove any leftovers from previous run.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close down the TaskTracker and all its components. We must also shutdown
+ any running tasks or threads, and cleanup disk space. A new TaskTracker
+ within the same process space might be restarted, so everything must be
+ clean.]]>
+ </doc>
+ </method>
+ <method name="getJobClient" return="org.apache.hadoop.mapred.InterTrackerProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The connection to the JobTracker, used by the TaskRunner
+ for locating remote files.]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerReportAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the port at which the tasktracker bound to]]>
+ </doc>
+ </method>
+ <method name="getJvmManagerInstance" return="org.apache.hadoop.mapred.JvmManager"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The server retry loop.
+ This while-loop attempts to connect to the JobTracker. It only
+ loops when the old TaskTracker has gone bad (its state is
+ stale somehow) and we need to reinitialize everything.]]>
+ </doc>
+ </method>
+ <method name="getTask" return="org.apache.hadoop.mapred.JvmTask"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jvmId" type="org.apache.hadoop.mapred.JVMId"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called upon startup by the child process, to fetch Task data.]]>
+ </doc>
+ </method>
+ <method name="statusUpdate" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskStatus" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called periodically to report Task progress, from 0.0 to 1.0.]]>
+ </doc>
+ </method>
+ <method name="reportDiagnosticInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="info" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when the task dies before completion, and we want to report back
+ diagnostic info]]>
+ </doc>
+ </method>
+ <method name="reportNextRecordRange"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="range" type="org.apache.hadoop.mapred.SortedRanges.Range"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ping" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Child checking to see if we're alive. Normally does nothing.]]>
+ </doc>
+ </method>
+ <method name="commitPending"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskStatus" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Task is reporting that it is in commit_pending
+ and it is waiting for the commit Response]]>
+ </doc>
+ </method>
+ <method name="canCommit" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Child checking whether it can commit]]>
+ </doc>
+ </method>
+ <method name="done"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The task is done.]]>
+ </doc>
+ </method>
+ <method name="shuffleError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A reduce-task failed to shuffle the map-outputs. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="fsError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A child task had a local filesystem error. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="getMapCompletionEvents" return="org.apache.hadoop.mapred.MapTaskCompletionEventsUpdate"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxLocs" type="int"/>
+ <param name="id" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mapOutputLost"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="errorMsg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A completed map task's output has been lost.]]>
+ </doc>
+ </method>
+ <method name="isIdle" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this task tracker idle?
+ @return has this task tracker finished and cleaned up all of its tasks?]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Start the TaskTracker, point toward the indicated JobTracker]]>
+ </doc>
+ </method>
+ <method name="isTaskMemoryManagerEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the TaskMemoryManager Enabled on this system?
+ @return true if enabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getTaskMemoryManager" return="org.apache.hadoop.mapred.TaskMemoryManagerThread"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MR_CLIENTTRACE_FORMAT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ClientTraceLog" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[TaskTracker is a process that starts and tracks MR Tasks
+ in a networked environment. It contacts the JobTracker
+ for Task assignments and reporting results.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <class name="TaskTracker.MapOutputServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskTracker.MapOutputServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in TaskTracker's Jetty to serve the map outputs
+ to other nodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <!-- start class org.apache.hadoop.mapred.TextInputFormat -->
+ <class name="TextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="TextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Keys are
+ the position in the file, and values are the line of text..]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat -->
+ <class name="TextOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes plain text files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+ <class name="TextOutputFormat.LineRecordWriter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"/>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="out" type="java.io.DataOutputStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+</package>
+<package name="org.apache.hadoop.mapred.jobcontrol">
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <class name="Job" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf, java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+ @param jobConf a mapred job configuration representing a job to be executed.
+ @param dependingJobs an array of jobs the current job depends on]]>
+ </doc>
+ </constructor>
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+
+ @param jobConf mapred job configuration representing a job to be executed.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job name of this job]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job name for this job.
+ @param jobName the job name]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job ID of this job assigned by JobControl]]>
+ </doc>
+ </method>
+ <method name="setJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job ID for this job.
+ @param id the job ID]]>
+ </doc>
+ </method>
+ <method name="getMapredJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getAssignedJobID()} instead">
+ <doc>
+ <![CDATA[@return the mapred ID of this job
+ @deprecated use {@link #getAssignedJobID()} instead]]>
+ </doc>
+ </method>
+ <method name="setMapredJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #setAssignedJobID(JobID)} instead">
+ <param name="mapredJobID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job.
+ @param mapredJobID the mapred job ID for this job.
+ @deprecated use {@link #setAssignedJobID(JobID)} instead]]>
+ </doc>
+ </method>
+ <method name="getAssignedJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred ID of this job as assigned by the
+ mapred framework.]]>
+ </doc>
+ </method>
+ <method name="setAssignedJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mapredJobID" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job as assigned by the
+ mapred framework.
+ @param mapredJobID the mapred job ID for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred job conf of this job]]>
+ </doc>
+ </method>
+ <method name="setJobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Set the mapred job conf for this job.
+ @param jobConf the mapred job conf for this job.]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the state of this job]]>
+ </doc>
+ </method>
+ <method name="setState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Set the state for this job.
+ @param state the new state for this job.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the message of this job]]>
+ </doc>
+ </method>
+ <method name="setMessage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="message" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the message for this job.
+ @param message the message for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobClient" return="org.apache.hadoop.mapred.JobClient"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job client of this job]]>
+ </doc>
+ </method>
+ <method name="getDependingJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the depending jobs of this job]]>
+ </doc>
+ </method>
+ <method name="addDependingJob" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dependingJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a job to this jobs' dependency list. Dependent jobs can only be added while a Job
+ is waiting to run, not during or afterwards.
+
+ @param dependingJob Job that this Job depends on.
+ @return <tt>true</tt> if the Job was added.]]>
+ </doc>
+ </method>
+ <method name="isCompleted" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in a complete state]]>
+ </doc>
+ </method>
+ <method name="isReady" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in READY state]]>
+ </doc>
+ </method>
+ <method name="submit"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Submit this job to mapred. The state becomes RUNNING if submission
+ is successful, FAILED otherwise.]]>
+ </doc>
+ </method>
+ <field name="SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WAITING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEPENDENT_FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class encapsulates a MapReduce job and its dependency. It monitors
+ the states of the depending jobs and updates the state of this job.
+ A job stats in the WAITING state. If it does not have any deoending jobs, or
+ all of the depending jobs are in SUCCESS state, then the job state will become
+ READY. If any depending jobs fail, the job will fail too.
+ When in READY state, the job can be submitted to Hadoop for execution, with
+ the state changing into RUNNING state. From RUNNING state, the job can get into
+ SUCCESS or FAILED state, depending the status of the jon execution.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+ <class name="JobControl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobControl" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a job control for a group of jobs.
+ @param groupName a name identifying this group]]>
+ </doc>
+ </constructor>
+ <method name="getWaitingJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the waiting state]]>
+ </doc>
+ </method>
+ <method name="getRunningJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the running state]]>
+ </doc>
+ </method>
+ <method name="getReadyJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the ready state]]>
+ </doc>
+ </method>
+ <method name="getSuccessfulJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the success state]]>
+ </doc>
+ </method>
+ <method name="getFailedJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="addJob" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="aJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a new job.
+ @param aJob the new job]]>
+ </doc>
+ </method>
+ <method name="addJobs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobs" type="java.util.Collection&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"/>
+ <doc>
+ <![CDATA[Add a collection of jobs
+
+ @param jobs]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the thread state]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[set the thread state to STOPPING so that the
+ thread will stop when it wakes up.]]>
+ </doc>
+ </method>
+ <method name="suspend"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[suspend the running thread]]>
+ </doc>
+ </method>
+ <method name="resume"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[resume the suspended thread]]>
+ </doc>
+ </method>
+ <method name="allFinished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The main loop for the thread.
+ The loop does the following:
+ Check the states of the running jobs
+ Update the states of waiting jobs
+ Submit the jobs in ready state]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a set of MapReduce jobs and its dependency. It tracks
+ the states of the jobs by placing them into different tables according to their
+ states.
+
+ This class provides APIs for the client app to add a job to the group and to get
+ the jobs in the group in different states. When a
+ job is added, an ID unique to the group is assigned to the job.
+
+ This class has a thread that submits jobs when they become ready, monitors the
+ states of the running jobs, and updates the states of jobs based on the state changes
+ of their depending jobs states. The class provides APIs for suspending/resuming
+ the thread,and for stopping the thread.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+</package>
+<package name="org.apache.hadoop.mapred.join">
+ <!-- start class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <class name="ArrayListBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"/>
+ <constructor name="ArrayListBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayListBackedIterator" type="java.util.ArrayList&lt;X&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. The
+ implementation uses an {@link java.util.ArrayList} to store elements
+ added to it, replaying them as requested.
+ Prefer {@link StreamBackedIterator}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <interface name="ComposableInputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Refinement of InputFormat requiring implementors to provide
+ ComposableRecordReader instead of RecordReader.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <interface name="ComposableRecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"/>
+ <implements name="java.lang.Comparable&lt;org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;&gt;"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key this RecordReader would supply on a call to next(K,V)]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RecordReader into the object provided.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the stream is not empty, but provides no guarantee that
+ a call to next(K,V) will succeed.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[While key-value pairs from this RecordReader match the given key, register
+ them with the JoinCollector provided.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Additional operations required of a RecordReader to participate in a join.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <class name="CompositeInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="CompositeInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Interpret a given string as a composite expression.
+ {@code
+ func ::= <ident>([<func>,]*<func>)
+ func ::= tbl(<class>,"<path>")
+ class ::= @see java.lang.Class#forName(java.lang.String)
+ path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String)
+ }
+ Reads expression from the <tt>mapred.join.expr</tt> property and
+ user-supplied join types from <tt>mapred.join.define.&lt;ident&gt;</tt>
+ types. Paths supplied to <tt>tbl</tt> are given as input paths to the
+ InputFormat class listed.
+ @see #compose(java.lang.String, java.lang.Class, java.lang.String...)]]>
+ </doc>
+ </method>
+ <method name="addDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds the default set of identifiers to the parser.]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a CompositeInputSplit from the child InputFormats by assigning the
+ ith split from each child to the ith composite split.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a CompositeRecordReader for the children of this InputFormat
+ as defined in the init expression.
+ The outermost join need only be composable, not necessarily a composite.
+ Mandating TupleWritable isn't strictly correct.]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given InputFormat class (inf), path (p) return:
+ {@code tbl(<inf>, <p>) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An InputFormat capable of performing joins over a set of data sources sorted
+ and partitioned the same way.
+ @see #setFormat
+
+ A user may define new join types by setting the property
+ <tt>mapred.join.define.&lt;ident&gt;</tt> to a classname. In the expression
+ <tt>mapred.join.expr</tt>, the identifier will be assumed to be a
+ ComposableRecordReader.
+ <tt>mapred.join.keycomparator</tt> can be a classname used to compare keys
+ in the join.
+ @see JoinRecordReader
+ @see MultiFilterRecordReader]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <class name="CompositeInputSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="CompositeInputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CompositeInputSplit" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.mapred.InputSplit"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an InputSplit to this collection.
+ @throws IOException If capacity was not specified during construction
+ or if capacity has been reached.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the aggregate length of all child InputSplits currently added.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the length of ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Collect a set of hosts from all child InputSplits.]]>
+ </doc>
+ </method>
+ <method name="getLocation" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[getLocations from ith InputSplit.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write splits in the following format.
+ {@code
+ <count><class1><class2>...<classn><split1><split2>...<splitn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}
+ @throws IOException If the child InputSplit cannot be read, typically
+ for faliing access checks.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This InputSplit contains a set of child InputSplits. Any InputSplit inserted
+ into this collection must have a public default constructor.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <class name="CompositeRecordReader" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="CompositeRecordReader" type="int, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a RecordReader with <tt>capacity</tt> children to position
+ <tt>id</tt> in the parent reader.
+ The id of a root CompositeRecordReader is -1 by convention, but relying
+ on this is not recommended.]]>
+ </doc>
+ </constructor>
+ <method name="combine" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ </method>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordReaderQueue" return="java.util.PriorityQueue&lt;org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return sorted list of RecordReaders for this composite.]]>
+ </doc>
+ </method>
+ <method name="getComparator" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return comparator defining the ordering for RecordReaders in this
+ composite.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rr" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ? extends V&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a RecordReader to this collection.
+ The id() of a RecordReader determines where in the Tuple its
+ entry will appear. Adding RecordReaders with the same id has
+ undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key for the current join or the value at the top of the
+ RecordReader heap.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the top of this RR into the given object.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if it is possible that this could emit more values.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Pass skip key to child RRs.]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Obtain an iterator over the child RRs apropos of the value type
+ ultimately emitted from this join.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If key provided matches that of this Composite, give JoinCollector
+ iterator over values it may emit.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For all child RRs offering the key provided, obtain an iterator
+ at that position in the JoinCollector.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key of join or head of heap
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new key value common to all child RRs.
+ @throws ClassCastException if key classes differ.]]>
+ </doc>
+ </method>
+ <method name="createInternalValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a value to be used internally for joins.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unsupported (returns zero in all cases).]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all child RRs.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report progress as the minimum of all child RR progress.]]>
+ </doc>
+ </method>
+ <field name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, V, X&gt;.JoinCollector"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="kids" type="org.apache.hadoop.mapred.join.ComposableRecordReader[]"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A RecordReader that can effect joins of RecordReaders sharing a common key
+ type and partitioning.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <class name="InnerJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader&lt;K&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Return true iff the tuple is full (all data sources contain this key).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full inner join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <class name="JoinRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, org.apache.hadoop.io.Writable, org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="JoinRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Emit the next set of key, value pairs as defined by the child
+ RecordReaders and operation associated with this composite RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator wrapping the JoinCollector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite joins returning Tuples of arbitrary Writables.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <class name="JoinRecordReader.JoinDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="JoinRecordReader.JoinDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Since the JoinCollector is effecting our operation, we need only
+ provide an iterator proxy wrapping its operation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <class name="MultiFilterRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, V, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, V&gt;"/>
+ <constructor name="MultiFilterRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="emit" return="V extends org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For each tuple emitted, return a value (typically one of the values
+ in the tuple).
+ Modifying the Writables in the tuple is permitted and unlikely to affect
+ join behavior in most cases, but it is not recommended. It's safer to
+ clone first.]]>
+ </doc>
+ </method>
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Default implementation offers {@link #emit} every Tuple from the
+ collector (the outer join of child RRs).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createValue" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator returning a single value from the tuple.
+ @see MultiFilterDelegationIterator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite join returning values derived from multiple
+ sources, but generally not tuples.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <class name="MultiFilterRecordReader.MultiFilterDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;V&gt;"/>
+ <constructor name="MultiFilterRecordReader.MultiFilterDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy the JoinCollector, but include callback to emit.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <class name="OuterJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader&lt;K&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit everything from the collector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full outer join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <class name="OverrideRecordReader" extends="org.apache.hadoop.mapred.join.MultiFilterRecordReader&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="emit" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit the value with the highest position in the tuple.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instead of filling the JoinCollector with iterators from all
+ data sources, fill only the rightmost for this key.
+ This not only saves space by discarding the other sources, but
+ it also emits the number of key-value pairs in the preferred
+ RecordReader instead of repeating that stream n times, where
+ n is the cardinality of the cross product of the discarded
+ streams for the given key.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Prefer the &quot;rightmost&quot; data source for this key.
+ For example, <tt>override(S1,S2,S3)</tt> will prefer values
+ from S3 over S2, and values from S2 over S1 for all keys
+ emitted from all sources.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser -->
+ <class name="Parser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Very simple shift-reduce parser for join expressions.
+
+ This should be sufficient for the user extension permitted now, but ought to
+ be replaced with a parser generator if more complex grammars are supported.
+ In particular, this &quot;shift-reduce&quot; parser has no states. Each set
+ of formals requires a different internal node type, which is responsible for
+ interpreting the list of tokens it receives. This is sufficient for the
+ current grammar, but it has several annoying properties that might inhibit
+ extension. In particular, parenthesis are always function calls; an
+ algebraic or filter grammar would not only require a node type, but must
+ also work around the internals of this parser.
+
+ For most other cases, adding classes to the hierarchy- particularly by
+ extending JoinRecordReader and MultiFilterRecordReader- is fairly
+ straightforward. One need only override the relevant method(s) (usually only
+ {@link CompositeRecordReader#combine}) and include a property to map its
+ value to an identifier in the parser.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Node -->
+ <class name="Parser.Node" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat"/>
+ <constructor name="Parser.Node" type="java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addIdentifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="ident" type="java.lang.String"/>
+ <param name="mcstrSig" type="java.lang.Class[]"/>
+ <param name="nodetype" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.join.Parser.Node&gt;"/>
+ <param name="cl" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.join.ComposableRecordReader&gt;"/>
+ <exception name="NoSuchMethodException" type="java.lang.NoSuchMethodException"/>
+ <doc>
+ <![CDATA[For a given identifier, add a mapping to the nodetype for the parse
+ tree and to the ComposableRecordReader to be created, including the
+ formals required to invoke the constructor.
+ The nodetype and constructor signature should be filled in from the
+ child node.]]>
+ </doc>
+ </method>
+ <method name="setID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="int"/>
+ </method>
+ <method name="setKeyComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="cmpcl" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"/>
+ </method>
+ <field name="rrCstrMap" type="java.util.Map&lt;java.lang.String, java.lang.reflect.Constructor&lt;? extends org.apache.hadoop.mapred.join.ComposableRecordReader&gt;&gt;"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ident" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="cmpcl" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Node -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <class name="Parser.NodeToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <class name="Parser.NumToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.NumToken" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <class name="Parser.StrToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.StrToken" type="org.apache.hadoop.mapred.join.Parser.TType, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Token -->
+ <class name="Parser.Token" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getType" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Tagged-union type for tokens from the join expression.
+ @see Parser.TType]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Token -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.TType -->
+ <class name="Parser.TType" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.join.Parser.TType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.join.Parser.TType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.TType -->
+ <!-- start interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <interface name="ResetableIterator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True if a call to next may return a value. This is permitted false
+ positives, but not false negatives.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign next value to actual.
+ It is required that elements added to a ResetableIterator be returned in
+ the same order after a call to {@link #reset} (FIFO).
+
+ Note that a call to this may fail for nested joins (i.e. more elements
+ available, but none satisfying the constraints of the join)]]>
+ </doc>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign last value returned to actual.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set iterator to return to the start of its range. Must be called after
+ calling {@link #add} to avoid a ConcurrentModificationException.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an element to the collection of elements to iterate over.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close datasources and release resources. Calling methods on the iterator
+ after calling close has undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Close datasources, but do not release internal resources. Calling this
+ method should permit the object to be reused with a different datasource.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This defines an interface to a stateful Iterator that can replay elements
+ added to it directly.
+ Note that this does not extend {@link java.util.Iterator}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <class name="ResetableIterator.EMPTY" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;U&gt;"/>
+ <constructor name="ResetableIterator.EMPTY"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <!-- start class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <class name="StreamBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"/>
+ <constructor name="StreamBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. This
+ implementation uses a byte array to store elements added to it.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.TupleWritable -->
+ <class name="TupleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="TupleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty tuple with no allocated storage for writables.]]>
+ </doc>
+ </constructor>
+ <constructor name="TupleWritable" type="org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Initialize tuple with storage; unknown whether any of them contain
+ &quot;written&quot; values.]]>
+ </doc>
+ </constructor>
+ <method name="has" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Return true if tuple has an element at the position provided.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith Writable from Tuple.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of children in this Tuple.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator over the elements in this tuple.
+ Note that this doesn't flatten the tuple; one may receive tuples
+ from this iterator.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert Tuple to String as in the following.
+ <tt>[<child1>,<child2>,...,<childn>]</tt>]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes each Writable to <code>out</code>.
+ TupleWritable format:
+ {@code
+ <count><type1><type2>...<typen><obj1><obj2>...<objn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writable type storing multiple {@link org.apache.hadoop.io.Writable}s.
+
+ This is *not* a general-purpose tuple type. In almost all cases, users are
+ encouraged to implement their own serializable types, which can perform
+ better validation and provide more efficient encodings than this class is
+ capable. TupleWritable relies on the join framework for type safety and
+ assumes its instances will rarely be persisted, assumptions not only
+ incompatible with, but contrary to the general case.
+
+ @see org.apache.hadoop.io.Writable]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.TupleWritable -->
+ <!-- start class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+ <class name="WrappedRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, U&gt;"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key at the head of this RR.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RR into the object supplied.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if the RR- including the k,v pair stored in this object-
+ is exhausted.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next k,v pair into the head of this object; return true iff
+ the RR and this are exhausted.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an iterator to the collector at the position occupied by this
+ RecordReader over the values in this stream paired with the key
+ provided (ie register a stream of values from this source matching K
+ with a collector).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write key-value pair at the head of this stream to the objects provided;
+ get next key-value pair from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new key from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="U extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new value from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request progress from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request position from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Forward close request to proxied RR.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key at head of proxied RR
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true iff compareTo(other) retn true.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy class for a RecordReader participating in the join framework.
+ This class keeps track of the &quot;head&quot; key-value pair for the
+ provided RecordReader and keeps a store of values matching a key when
+ this source is participating in a join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+</package>
+<package name="org.apache.hadoop.mapred.lib">
+ <!-- start class org.apache.hadoop.mapred.lib.ChainMapper -->
+ <class name="ChainMapper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper"/>
+ <constructor name="ChainMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor.]]>
+ </doc>
+ </constructor>
+ <method name="addMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="klass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&lt;K1, V1, K2, V2&gt;&gt;"/>
+ <param name="inputKeyClass" type="java.lang.Class&lt;? extends K1&gt;"/>
+ <param name="inputValueClass" type="java.lang.Class&lt;? extends V1&gt;"/>
+ <param name="outputKeyClass" type="java.lang.Class&lt;? extends K2&gt;"/>
+ <param name="outputValueClass" type="java.lang.Class&lt;? extends V2&gt;"/>
+ <param name="byValue" type="boolean"/>
+ <param name="mapperConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Adds a Mapper class to the chain job's JobConf.
+ <p/>
+ It has to be specified how key and values are passed from one element of
+ the chain to the next, by value or by reference. If a Mapper leverages the
+ assumed semantics that the key and values are not modified by the collector
+ 'by value' must be used. If the Mapper does not expect this semantics, as
+ an optimization to avoid serialization and deserialization 'by reference'
+ can be used.
+ <p/>
+ For the added Mapper the configuration given for it,
+ <code>mapperConf</code>, have precedence over the job's JobConf. This
+ precedence is in effect when the task is running.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain
+ <p/>
+
+ @param job job's JobConf to add the Mapper class.
+ @param klass the Mapper class to add.
+ @param inputKeyClass mapper input key class.
+ @param inputValueClass mapper input value class.
+ @param outputKeyClass mapper output key class.
+ @param outputValueClass mapper output value class.
+ @param byValue indicates if key/values should be passed by value
+ to the next Mapper in the chain, if any.
+ @param mapperConf a JobConf with the configuration for the Mapper
+ class. It is recommended to use a JobConf without default values using the
+ <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configures the ChainMapper and all the Mappers in the chain.
+ <p/>
+ If this method is overriden <code>super.configure(...)</code> should be
+ invoked at the beginning of the overwriter method.]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Chains the <code>map(...)</code> methods of the Mappers in the chain.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes the ChainMapper and all the Mappers in the chain.
+ <p/>
+ If this method is overriden <code>super.close()</code> should be
+ invoked at the end of the overwriter method.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The ChainMapper class allows to use multiple Mapper classes within a single
+ Map task.
+ <p/>
+ The Mapper classes are invoked in a chained (or piped) fashion, the output of
+ the first becomes the input of the second, and so on until the last Mapper,
+ the output of the last Mapper will be written to the task's output.
+ <p/>
+ The key functionality of this feature is that the Mappers in the chain do not
+ need to be aware that they are executed in a chain. This enables having
+ reusable specialized Mappers that can be combined to perform composite
+ operations within a single task.
+ <p/>
+ Special care has to be taken when creating chains that the key/values output
+ by a Mapper are valid for the following Mapper in the chain. It is assumed
+ all Mappers and the Reduce in the chain use maching output and input key and
+ value classes as no conversion is done by the chaining code.
+ <p/>
+ Using the ChainMapper and the ChainReducer classes is possible to compose
+ Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
+ immediate benefit of this pattern is a dramatic reduction in disk IO.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain.
+ <p/>
+ ChainMapper usage pattern:
+ <p/>
+ <pre>
+ ...
+ conf.setJobName("chain");
+ conf.setInputFormat(TextInputFormat.class);
+ conf.setOutputFormat(TextOutputFormat.class);
+ <p/>
+ JobConf mapAConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
+ Text.class, Text.class, true, mapAConf);
+ <p/>
+ JobConf mapBConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
+ LongWritable.class, Text.class, false, mapBConf);
+ <p/>
+ JobConf reduceConf = new JobConf(false);
+ ...
+ ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
+ Text.class, Text.class, true, reduceConf);
+ <p/>
+ ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
+ LongWritable.class, Text.class, false, null);
+ <p/>
+ ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
+ LongWritable.class, LongWritable.class, true, null);
+ <p/>
+ FileInputFormat.setInputPaths(conf, inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+ ...
+ <p/>
+ JobClient jc = new JobClient(conf);
+ RunningJob job = jc.submitJob(conf);
+ ...
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.ChainMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.ChainReducer -->
+ <class name="ChainReducer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer"/>
+ <constructor name="ChainReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor.]]>
+ </doc>
+ </constructor>
+ <method name="setReducer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="klass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&lt;K1, V1, K2, V2&gt;&gt;"/>
+ <param name="inputKeyClass" type="java.lang.Class&lt;? extends K1&gt;"/>
+ <param name="inputValueClass" type="java.lang.Class&lt;? extends V1&gt;"/>
+ <param name="outputKeyClass" type="java.lang.Class&lt;? extends K2&gt;"/>
+ <param name="outputValueClass" type="java.lang.Class&lt;? extends V2&gt;"/>
+ <param name="byValue" type="boolean"/>
+ <param name="reducerConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Sets the Reducer class to the chain job's JobConf.
+ <p/>
+ It has to be specified how key and values are passed from one element of
+ the chain to the next, by value or by reference. If a Reducer leverages the
+ assumed semantics that the key and values are not modified by the collector
+ 'by value' must be used. If the Reducer does not expect this semantics, as
+ an optimization to avoid serialization and deserialization 'by reference'
+ can be used.
+ <p/>
+ For the added Reducer the configuration given for it,
+ <code>reducerConf</code>, have precedence over the job's JobConf. This
+ precedence is in effect when the task is running.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainReducer, this is done by the setReducer or the addMapper for the last
+ element in the chain.
+
+ @param job job's JobConf to add the Reducer class.
+ @param klass the Reducer class to add.
+ @param inputKeyClass reducer input key class.
+ @param inputValueClass reducer input value class.
+ @param outputKeyClass reducer output key class.
+ @param outputValueClass reducer output value class.
+ @param byValue indicates if key/values should be passed by value
+ to the next Mapper in the chain, if any.
+ @param reducerConf a JobConf with the configuration for the Reducer
+ class. It is recommended to use a JobConf without default values using the
+ <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.]]>
+ </doc>
+ </method>
+ <method name="addMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="klass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&lt;K1, V1, K2, V2&gt;&gt;"/>
+ <param name="inputKeyClass" type="java.lang.Class&lt;? extends K1&gt;"/>
+ <param name="inputValueClass" type="java.lang.Class&lt;? extends V1&gt;"/>
+ <param name="outputKeyClass" type="java.lang.Class&lt;? extends K2&gt;"/>
+ <param name="outputValueClass" type="java.lang.Class&lt;? extends V2&gt;"/>
+ <param name="byValue" type="boolean"/>
+ <param name="mapperConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Adds a Mapper class to the chain job's JobConf.
+ <p/>
+ It has to be specified how key and values are passed from one element of
+ the chain to the next, by value or by reference. If a Mapper leverages the
+ assumed semantics that the key and values are not modified by the collector
+ 'by value' must be used. If the Mapper does not expect this semantics, as
+ an optimization to avoid serialization and deserialization 'by reference'
+ can be used.
+ <p/>
+ For the added Mapper the configuration given for it,
+ <code>mapperConf</code>, have precedence over the job's JobConf. This
+ precedence is in effect when the task is running.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain
+ .
+
+ @param job chain job's JobConf to add the Mapper class.
+ @param klass the Mapper class to add.
+ @param inputKeyClass mapper input key class.
+ @param inputValueClass mapper input value class.
+ @param outputKeyClass mapper output key class.
+ @param outputValueClass mapper output value class.
+ @param byValue indicates if key/values should be passed by value
+ to the next Mapper in the chain, if any.
+ @param mapperConf a JobConf with the configuration for the Mapper
+ class. It is recommended to use a JobConf without default values using the
+ <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configures the ChainReducer, the Reducer and all the Mappers in the chain.
+ <p/>
+ If this method is overriden <code>super.configure(...)</code> should be
+ invoked at the beginning of the overwriter method.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="values" type="java.util.Iterator"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Chains the <code>reduce(...)</code> method of the Reducer with the
+ <code>map(...) </code> methods of the Mappers in the chain.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes the ChainReducer, the Reducer and all the Mappers in the chain.
+ <p/>
+ If this method is overriden <code>super.close()</code> should be
+ invoked at the end of the overwriter method.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The ChainReducer class allows to chain multiple Mapper classes after a
+ Reducer within the Reducer task.
+ <p/>
+ For each record output by the Reducer, the Mapper classes are invoked in a
+ chained (or piped) fashion, the output of the first becomes the input of the
+ second, and so on until the last Mapper, the output of the last Mapper will
+ be written to the task's output.
+ <p/>
+ The key functionality of this feature is that the Mappers in the chain do not
+ need to be aware that they are executed after the Reducer or in a chain.
+ This enables having reusable specialized Mappers that can be combined to
+ perform composite operations within a single task.
+ <p/>
+ Special care has to be taken when creating chains that the key/values output
+ by a Mapper are valid for the following Mapper in the chain. It is assumed
+ all Mappers and the Reduce in the chain use maching output and input key and
+ value classes as no conversion is done by the chaining code.
+ <p/>
+ Using the ChainMapper and the ChainReducer classes is possible to compose
+ Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
+ immediate benefit of this pattern is a dramatic reduction in disk IO.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainReducer, this is done by the setReducer or the addMapper for the last
+ element in the chain.
+ <p/>
+ ChainReducer usage pattern:
+ <p/>
+ <pre>
+ ...
+ conf.setJobName("chain");
+ conf.setInputFormat(TextInputFormat.class);
+ conf.setOutputFormat(TextOutputFormat.class);
+ <p/>
+ JobConf mapAConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
+ Text.class, Text.class, true, mapAConf);
+ <p/>
+ JobConf mapBConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
+ LongWritable.class, Text.class, false, mapBConf);
+ <p/>
+ JobConf reduceConf = new JobConf(false);
+ ...
+ ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
+ Text.class, Text.class, true, reduceConf);
+ <p/>
+ ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
+ LongWritable.class, Text.class, false, null);
+ <p/>
+ ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
+ LongWritable.class, LongWritable.class, true, null);
+ <p/>
+ FileInputFormat.setInputPaths(conf, inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+ ...
+ <p/>
+ JobClient jc = new JobClient(conf);
+ RunningJob job = jc.submitJob(conf);
+ ...
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.ChainReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.DelegatingInputFormat -->
+ <class name="DelegatingInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <constructor name="DelegatingInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} that delegates behaviour of paths to multiple other
+ InputFormats.
+
+ @see MultipleInputs#addInputPath(JobConf, Path, Class, Class)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.DelegatingInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.DelegatingMapper -->
+ <class name="DelegatingMapper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="DelegatingMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1"/>
+ <param name="value" type="V1"/>
+ <param name="outputCollector" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link Mapper} that delegates behaviour of paths to multiple other
+ mappers.
+
+ @see MultipleInputs#addInputPath(JobConf, Path, Class, Class)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.DelegatingMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <class name="FieldSelectionMapReduce" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="FieldSelectionMapReduce"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="val" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to output.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements a mapper/reducer class that can be used to perform
+ field selections in a manner similar to unix cut. The input data is treated
+ as fields separated by a user specified separator (the default value is
+ "\t"). The user can specify a list of fields that form the map output keys,
+ and a list of fields that form the map output values. If the inputformat is
+ TextInputFormat, the mapper will ignore the key to the map function. and the
+ fields are from the value only. Otherwise, the fields are the union of those
+ from the key and those from the value.
+
+ The field separator is under attribute "mapred.data.field.separator"
+
+ The map output field list spec is under attribute "map.output.key.value.fields.spec".
+ The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
+ key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
+ Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
+ (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all
+ the fields starting from field 3. The open range field spec applies value fields only.
+ They have no effect on the key fields.
+
+ Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
+ and use fields 6,5,1,2,3,7 and above for values.
+
+ The reduce output field list spec is under attribute "reduce.output.key.value.fields.spec".
+
+ The reducer extracts output key/value pairs in a similar manner, except that
+ the key is never ignored.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <!-- start class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <class name="HashPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K2, V2&gt;"/>
+ <constructor name="HashPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numReduceTasks" type="int"/>
+ <doc>
+ <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partition keys by their {@link Object#hashCode()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <class name="IdentityMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, K, V&gt;"/>
+ <constructor name="IdentityMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="val" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, V&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to
+ output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implements the identity function, mapping inputs directly to outputs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <class name="IdentityReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;K, V, K, V&gt;"/>
+ <constructor name="IdentityReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="values" type="java.util.Iterator&lt;V&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, V&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes all keys and values directly to output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Performs no reduction, writing all input values directly to the output.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.InputSampler -->
+ <class name="InputSampler" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="InputSampler" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="writePartitionFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="sampler" type="org.apache.hadoop.mapred.lib.InputSampler.Sampler&lt;K, V&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a partition file for the given job, using the Sampler provided.
+ Queries the sampler for a sample keyset, sorts by the output key
+ comparator, selects the keys for each rank, and writes to the destination
+ returned from {@link
+ org.apache.hadoop.mapred.lib.TotalOrderPartitioner#getPartitionFile}.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Driver for InputSampler from the command line.
+ Configures a JobConf instance and calls {@link #writePartitionFile}.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Utility for collecting samples and writing a partition file for
+ {@link org.apache.hadoop.mapred.lib.TotalOrderPartitioner}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InputSampler -->
+ <!-- start class org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler -->
+ <class name="InputSampler.IntervalSampler" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.InputSampler.Sampler&lt;K, V&gt;"/>
+ <constructor name="InputSampler.IntervalSampler" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new IntervalSampler sampling <em>all</em> splits.
+ @param freq The frequency with which records will be emitted.]]>
+ </doc>
+ </constructor>
+ <constructor name="InputSampler.IntervalSampler" type="double, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new IntervalSampler.
+ @param freq The frequency with which records will be emitted.
+ @param maxSplitsSampled The maximum number of splits to examine.
+ @see #getSample]]>
+ </doc>
+ </constructor>
+ <method name="getSample" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For each split sampled, emit when the ratio of the number of records
+ retained to the total record count is less than the specified
+ frequency.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sample from s splits at regular intervals.
+ Useful for sorted data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler -->
+ <!-- start class org.apache.hadoop.mapred.lib.InputSampler.RandomSampler -->
+ <class name="InputSampler.RandomSampler" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.InputSampler.Sampler&lt;K, V&gt;"/>
+ <constructor name="InputSampler.RandomSampler" type="double, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new RandomSampler sampling <em>all</em> splits.
+ This will read every split at the client, which is very expensive.
+ @param freq Probability with which a key will be chosen.
+ @param numSamples Total number of samples to obtain from all selected
+ splits.]]>
+ </doc>
+ </constructor>
+ <constructor name="InputSampler.RandomSampler" type="double, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new RandomSampler.
+ @param freq Probability with which a key will be chosen.
+ @param numSamples Total number of samples to obtain from all selected
+ splits.
+ @param maxSplitsSampled The maximum number of splits to examine.]]>
+ </doc>
+ </constructor>
+ <method name="getSample" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Randomize the split order, then take the specified number of keys from
+ each split sampled, where each key is selected with the specified
+ probability and possibly replaced by a subsequently selected key when
+ the quota of keys from that split is satisfied.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sample from random points in the input.
+ General-purpose sampler. Takes numSamples / maxSplitsSampled inputs from
+ each split.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InputSampler.RandomSampler -->
+ <!-- start interface org.apache.hadoop.mapred.lib.InputSampler.Sampler -->
+ <interface name="InputSampler.Sampler" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getSample" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For a given job, collect and return a subset of the keys from the
+ input data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface to sample using an {@link org.apache.hadoop.mapred.InputFormat}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.InputSampler.Sampler -->
+ <!-- start class org.apache.hadoop.mapred.lib.InputSampler.SplitSampler -->
+ <class name="InputSampler.SplitSampler" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.InputSampler.Sampler&lt;K, V&gt;"/>
+ <constructor name="InputSampler.SplitSampler" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a SplitSampler sampling <em>all</em> splits.
+ Takes the first numSamples / numSplits records from each split.
+ @param numSamples Total number of samples to obtain from all selected
+ splits.]]>
+ </doc>
+ </constructor>
+ <constructor name="InputSampler.SplitSampler" type="int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new SplitSampler.
+ @param numSamples Total number of samples to obtain from all selected
+ splits.
+ @param maxSplitsSampled The maximum number of splits to examine.]]>
+ </doc>
+ </constructor>
+ <method name="getSample" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[From each split sampled, take the first numSamples / numSplits records.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Samples the first n records from s splits.
+ Inexpensive way to sample random data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InputSampler.SplitSampler -->
+ <!-- start class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <class name="InverseMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, V, K&gt;"/>
+ <constructor name="InverseMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;V, K&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The inverse function. Input keys and values are swapped.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that swaps keys and values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedComparator -->
+ <class name="KeyFieldBasedComparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="KeyFieldBasedComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[This comparator implementation provides a subset of the features provided
+ by the Unix/GNU Sort. In particular, the supported features are:
+ -n, (Sort numerically)
+ -r, (Reverse the result of comparison)
+ -k pos1[,pos2], where pos is of the form f[.c][opts], where f is the number
+ of the field to use, and c is the number of the first character from the
+ beginning of the field. Fields and character posns are numbered starting
+ with 1; a character position of zero in pos2 indicates the field's last
+ character. If '.c' is omitted from pos1, it defaults to 1 (the beginning
+ of the field); if omitted from pos2, it defaults to 0 (the end of the
+ field). opts are ordering options (any of 'nr' as described above).
+ We assume that the fields in the key are separated by
+ map.output.key.field.separator.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedComparator -->
+ <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <class name="KeyFieldBasedPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K2, V2&gt;"/>
+ <constructor name="KeyFieldBasedPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numReduceTasks" type="int"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="end" type="int"/>
+ <param name="currentHash" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[Defines a way to partition keys based on certain key fields (also see
+ {@link KeyFieldBasedComparator}.
+ The key specification supported is of the form -k pos1[,pos2], where,
+ pos is of the form f[.c][opts], where f is the number
+ of the key field to use, and c is the number of the first character from
+ the beginning of the field. Fields and character posns are numbered
+ starting with 1; a character position of zero in pos2 indicates the
+ field's last character. If '.c' is omitted from pos1, it defaults to 1
+ (the beginning of the field); if omitted from pos2, it defaults to 0
+ (the end of the field).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <class name="LongSumReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;K, org.apache.hadoop.io.LongWritable, K, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="LongSumReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Reducer} that sums long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleInputs -->
+ <class name="MultipleInputs" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleInputs"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inputFormatClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <doc>
+ <![CDATA[Add a {@link Path} with a custom {@link InputFormat} to the list of
+ inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for the job
+ @param inputFormatClass {@link InputFormat} class to use for this path]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inputFormatClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="mapperClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"/>
+ <doc>
+ <![CDATA[Add a {@link Path} with a custom {@link InputFormat} and
+ {@link Mapper} to the list of inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for the job
+ @param inputFormatClass {@link InputFormat} class to use for this path
+ @param mapperClass {@link Mapper} class to use for this path]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class supports MapReduce jobs that have multiple input paths with
+ a different {@link InputFormat} and {@link Mapper} for each path]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleInputs -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <class name="MultipleOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a composite record writer that can write key/value data to different
+ output files
+
+ @param fs
+ the file system to use
+ @param job
+ the job conf for the job
+ @param name
+ the leaf file name for the output file (such as part-00000")
+ @param arg3
+ a progressable for reporting progress.
+ @return a composite record writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="generateLeafFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the leaf name for the output file name. The default behavior does
+ not change the leaf file name (such as part-00000)
+
+ @param name
+ the leaf file name for the output file
+ @return the given leaf file name]]>
+ </doc>
+ </method>
+ <method name="generateFileNameForKeyValue" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the file output file name based on the given key and the leaf file
+ name. The default behavior is that the file name does not depend on the
+ key.
+
+ @param key
+ the key of the output data
+ @param name
+ the leaf file name
+ @return generated file name]]>
+ </doc>
+ </method>
+ <method name="generateActualKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <doc>
+ <![CDATA[Generate the actual key from the given key/value. The default behavior is that
+ the actual key is equal to the given key
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual key derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="generateActualValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <doc>
+ <![CDATA[Generate the actual value from the given key and value. The default behavior is that
+ the actual value is equal to the given value
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual value derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="getInputFileBasedOutputFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the outfile name based on a given anme and the input file name. If
+ the map input file does not exists (i.e. this is not for a map only job),
+ the given name is returned unchanged. If the config value for
+ "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
+ name is returned unchanged. Otherwise, return a file name consisting of the
+ N trailing legs of the input file name where N is the config value for
+ "num.of.trailing.legs.to.use".
+
+ @param job
+ the job config
+ @param name
+ the output file name
+ @return the outfile name based on a given anme and the input file name.]]>
+ </doc>
+ </method>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param fs
+ the file system to use
+ @param job
+ a job conf object
+ @param name
+ the name of the file over which a record writer object will be
+ constructed
+ @param arg3
+ a progressable object
+ @return A RecordWriter object over the given file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This abstract class extends the FileOutputFormat, allowing to write the
+ output data to different output files. There are three basic use cases for
+ this class.
+
+ Case one: This class is used for a map reduce job with at least one reducer.
+ The reducer wants to write data to different files depending on the actual
+ keys. It is assumed that a key (or value) encodes the actual key (value)
+ and the desired location for the actual key (value).
+
+ Case two: This class is used for a map only job. The job wants to use an
+ output file name that is either a part of the input file name of the input
+ data, or some derivation of it.
+
+ Case three: This class is used for a map only job. The job wants to use an
+ output file name that depends on both the keys and the input file name,]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputs -->
+ <class name="MultipleOutputs" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleOutputs" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates and initializes multiple named outputs support, it should be
+ instantiated in the Mapper/Reducer configure method.
+
+ @param job the job configuration object]]>
+ </doc>
+ </constructor>
+ <method name="getNamedOutputsList" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Returns list of channel names.
+
+ @param conf job conf
+ @return List of channel Names]]>
+ </doc>
+ </method>
+ <method name="isMultiNamedOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns if a named output is multiple.
+
+ @param conf job conf
+ @param namedOutput named output
+ @return <code>true</code> if the name output is multi, <code>false</code>
+ if it is single. If the name output is not defined it returns
+ <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getNamedOutputFormatClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputFormat&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the named output OutputFormat.
+
+ @param conf job conf
+ @param namedOutput named output
+ @return namedOutput OutputFormat]]>
+ </doc>
+ </method>
+ <method name="getNamedOutputKeyClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the key class for a named output.
+
+ @param conf job conf
+ @param namedOutput named output
+ @return class for the named output key]]>
+ </doc>
+ </method>
+ <method name="getNamedOutputValueClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value class for a named output.
+
+ @param conf job conf
+ @param namedOutput named output
+ @return class of named output value]]>
+ </doc>
+ </method>
+ <method name="addNamedOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <param name="outputFormatClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputFormat&gt;"/>
+ <param name="keyClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="valueClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Adds a named output for the job.
+ <p/>
+
+ @param conf job conf to add the named output
+ @param namedOutput named output name, it has to be a word, letters
+ and numbers only, cannot be the word 'part' as
+ that is reserved for the
+ default output.
+ @param outputFormatClass OutputFormat class.
+ @param keyClass key class
+ @param valueClass value class]]>
+ </doc>
+ </method>
+ <method name="addMultiNamedOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <param name="outputFormatClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputFormat&gt;"/>
+ <param name="keyClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="valueClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Adds a multi named output for the job.
+ <p/>
+
+ @param conf job conf to add the named output
+ @param namedOutput named output name, it has to be a word, letters
+ and numbers only, cannot be the word 'part' as
+ that is reserved for the
+ default output.
+ @param outputFormatClass OutputFormat class.
+ @param keyClass key class
+ @param valueClass value class]]>
+ </doc>
+ </method>
+ <method name="setCountersEnabled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="enabled" type="boolean"/>
+ <doc>
+ <![CDATA[Enables or disables counters for the named outputs.
+ <p/>
+ By default these counters are disabled.
+ <p/>
+ MultipleOutputs supports counters, by default the are disabled.
+ The counters group is the {@link MultipleOutputs} class name.
+ </p>
+ The names of the counters are the same as the named outputs. For multi
+ named outputs the name of the counter is the concatenation of the named
+ output, and underscore '_' and the multiname.
+
+ @param conf job conf to enableadd the named output.
+ @param enabled indicates if the counters will be enabled or not.]]>
+ </doc>
+ </method>
+ <method name="getCountersEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Returns if the counters for the named outputs are enabled or not.
+ <p/>
+ By default these counters are disabled.
+ <p/>
+ MultipleOutputs supports counters, by default the are disabled.
+ The counters group is the {@link MultipleOutputs} class name.
+ </p>
+ The names of the counters are the same as the named outputs. For multi
+ named outputs the name of the counter is the concatenation of the named
+ output, and underscore '_' and the multiname.
+
+
+ @param conf job conf to enableadd the named output.
+ @return TRUE if the counters are enabled, FALSE if they are disabled.]]>
+ </doc>
+ </method>
+ <method name="getNamedOutputs" return="java.util.Iterator&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns iterator with the defined name outputs.
+
+ @return iterator with the defined named outputs]]>
+ </doc>
+ </method>
+ <method name="getCollector" return="org.apache.hadoop.mapred.OutputCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="namedOutput" type="java.lang.String"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the output collector for a named output.
+ <p/>
+
+ @param namedOutput the named output name
+ @param reporter the reporter
+ @return the output collector for the given named output
+ @throws IOException thrown if output collector could not be created]]>
+ </doc>
+ </method>
+ <method name="getCollector" return="org.apache.hadoop.mapred.OutputCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="namedOutput" type="java.lang.String"/>
+ <param name="multiName" type="java.lang.String"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the output collector for a multi named output.
+ <p/>
+
+ @param namedOutput the named output name
+ @param multiName the multi name part
+ @param reporter the reporter
+ @return the output collector for the given named output
+ @throws IOException thrown if output collector could not be created]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes all the opened named outputs.
+ <p/>
+ If overriden subclasses must invoke <code>super.close()</code> at the
+ end of their <code>close()</code>
+
+ @throws java.io.IOException thrown if any of the MultipleOutput files
+ could not be closed properly.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MultipleOutputs class simplifies writting to additional outputs other
+ than the job default output via the <code>OutputCollector</code> passed to
+ the <code>map()</code> and <code>reduce()</code> methods of the
+ <code>Mapper</code> and <code>Reducer</code> implementations.
+ <p/>
+ Each additional output, or named output, may be configured with its own
+ <code>OutputFormat</code>, with its own key class and with its own value
+ class.
+ <p/>
+ A named output can be a single file or a multi file. The later is refered as
+ a multi named output.
+ <p/>
+ A multi named output is an unbound set of files all sharing the same
+ <code>OutputFormat</code>, key class and value class configuration.
+ <p/>
+ When named outputs are used within a <code>Mapper</code> implementation,
+ key/values written to a name output are not part of the reduce phase, only
+ key/values written to the job <code>OutputCollector</code> are part of the
+ reduce phase.
+ <p/>
+ MultipleOutputs supports counters, by default the are disabled. The counters
+ group is the {@link MultipleOutputs} class name.
+ </p>
+ The names of the counters are the same as the named outputs. For multi
+ named outputs the name of the counter is the concatenation of the named
+ output, and underscore '_' and the multiname.
+ <p/>
+ Job configuration usage pattern is:
+ <pre>
+
+ JobConf conf = new JobConf();
+
+ conf.setInputPath(inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+
+ conf.setMapperClass(MOMap.class);
+ conf.setReducerClass(MOReduce.class);
+ ...
+
+ // Defines additional single text based output 'text' for the job
+ MultipleOutputs.addNamedOutput(conf, "text", TextOutputFormat.class,
+ LongWritable.class, Text.class);
+
+ // Defines additional multi sequencefile based output 'sequence' for the
+ // job
+ MultipleOutputs.addMultiNamedOutput(conf, "seq",
+ SequenceFileOutputFormat.class,
+ LongWritable.class, Text.class);
+ ...
+
+ JobClient jc = new JobClient();
+ RunningJob job = jc.submitJob(conf);
+
+ ...
+ </pre>
+ <p/>
+ Job configuration usage pattern is:
+ <pre>
+
+ public class MOReduce implements
+ Reducer&lt;WritableComparable, Writable&gt; {
+ private MultipleOutputs mos;
+
+ public void configure(JobConf conf) {
+ ...
+ mos = new MultipleOutputs(conf);
+ }
+
+ public void reduce(WritableComparable key, Iterator&lt;Writable&gt; values,
+ OutputCollector output, Reporter reporter)
+ throws IOException {
+ ...
+ mos.getCollector("text", reporter).collect(key, new Text("Hello"));
+ mos.getCollector("seq", "A", reporter).collect(key, new Text("Bye"));
+ mos.getCollector("seq", "B", reporter).collect(key, new Text("Chau"));
+ ...
+ }
+
+ public void close() throws IOException {
+ mos.close();
+ ...
+ }
+
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputs -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <class name="MultipleSequenceFileOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleSequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output data
+ to different output files in sequence file output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <class name="MultipleTextOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleTextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output
+ data to different output files in Text output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <class name="MultithreadedMapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="MultithreadedMapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Multithreaded implementation for @link org.apache.hadoop.mapred.MapRunnable.
+ <p>
+ It can be used instead of the default implementation,
+ @link org.apache.hadoop.mapred.MapRunner, when the Map operation is not CPU
+ bound in order to improve throughput.
+ <p>
+ Map implementations using this MapRunnable must be thread-safe.
+ <p>
+ The Map-Reduce job has to be configured to use this MapRunnable class (using
+ the JobConf.setMapRunnerClass method) and
+ the number of thread the thread-pool can use with the
+ <code>mapred.map.multithreadedrunner.threads</code> property, its default
+ value is 10 threads.
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <!-- start class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+ <class name="NLineInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="NLineInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically splits the set of input files for the job, splits N lines
+ of the input as one split.
+
+ @see org.apache.hadoop.mapred.FileInputFormat#getSplits(JobConf, int)]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[NLineInputFormat which splits N lines of input as one split.
+
+ In many "pleasantly" parallel applications, each process/mapper
+ processes the same input file (s), but with computations are
+ controlled by different parameters.(Referred to as "parameter sweeps").
+ One way to achieve this, is to specify a set of parameters
+ (one set per line) as input in a control file
+ (which is the input path to the map-reduce application,
+ where as the input dataset is specified
+ via a config variable in JobConf.).
+
+ The NLineInputFormat can be used in such applications, that splits
+ the input file such that by default, one line is fed as
+ a value to one map task, and key is the offset.
+ i.e. (k,v) is (LongWritable, Text).
+ The location hints will span the whole mapred cluster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <class name="NullOutputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="NullOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[Consume all outputs and put them in /dev/null.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <class name="RegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="RegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+ <class name="TokenCountMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="TokenCountMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that maps text values into <token,freq> pairs. Uses
+ {@link StringTokenizer} to break text into tokens.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.TotalOrderPartitioner -->
+ <class name="TotalOrderPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K, V&gt;"/>
+ <constructor name="TotalOrderPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Read in the partition file and build indexing data structures.
+ If the keytype is {@link org.apache.hadoop.io.BinaryComparable} and
+ <tt>total.order.partitioner.natural.order</tt> is not false, a trie
+ of the first <tt>total.order.partitioner.max.trie.depth</tt>(2) + 1 bytes
+ will be built. Otherwise, keys will be located using a binary search of
+ the partition keyset using the {@link org.apache.hadoop.io.RawComparator}
+ defined for this job. The input file must be sorted with the same
+ comparator and contain {@link
+ org.apache.hadoop.mapred.JobConf#getNumReduceTasks} - 1 keys.]]>
+ </doc>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V"/>
+ <param name="numPartitions" type="int"/>
+ </method>
+ <method name="setPartitionFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the path to the SequenceFile storing the sorted partition keyset.
+ It must be the case that for <tt>R</tt> reduces, there are <tt>R-1</tt>
+ keys in the SequenceFile.]]>
+ </doc>
+ </method>
+ <method name="getPartitionFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the path to the SequenceFile storing the sorted partition keyset.
+ @see #setPartitionFile(JobConf,Path)]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_PATH" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Partitioner effecting a total order by reading split points from
+ an externally generated source.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.TotalOrderPartitioner -->
+</package>
+<package name="org.apache.hadoop.mapred.lib.aggregate">
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <class name="DoubleValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="DoubleValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a double value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="double"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a double value.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getSum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up a sequence of double
+ values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <class name="LongValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the maximum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <class name="LongValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the minimum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <class name="LongValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getSum" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <class name="StringValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the biggest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <class name="StringValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the smallest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <class name="UniqValueCount" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="UniqValueCount"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UniqValueCount" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor
+ @param maxNum the limit in the number of unique values to keep.]]>
+ </doc>
+ </constructor>
+ <method name="setMaxItems" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <doc>
+ <![CDATA[Set the limit on the number of unique values
+ @param n the desired limit on the number of unique values
+ @return the new limit on the number of unique values]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return the number of unique objects aggregated]]>
+ </doc>
+ </method>
+ <method name="getUniqueItems" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the set of the unique objects]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of the unique objects. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that dedupes a sequence of objects.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <class name="UserDefinedValueAggregatorDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="UserDefinedValueAggregatorDescriptor" type="java.lang.String, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param className the class name of the user defined descriptor class
+ @param job a configure object used for decriptor configuration]]>
+ </doc>
+ </constructor>
+ <method name="createInstance" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="className" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create an instance of the given class
+ @param className the name of the class
+ @return a dynamically created instance of the given class]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pairs
+ by delegating the invocation to the real object.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a wrapper for a user defined value aggregator descriptor.
+ It servs two functions: One is to create an object of ValueAggregatorDescriptor from the
+ name of a user defined class that may be dynamically loaded. The other is to
+ deligate inviokations of generateKeyValPairs function to the created object.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <interface name="ValueAggregator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val the value to be added]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the agregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return an array of values as the outputs of the combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface defines the minimal protocol for value aggregators.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <class name="ValueAggregatorBaseDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="ValueAggregatorBaseDescriptor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="generateEntry" return="java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <param name="id" type="java.lang.String"/>
+ <param name="val" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @param id the aggregation id
+ @param val the val associated with the id to be aggregated
+ @return an Entry whose key is the aggregation id prefixed with
+ the aggregation type.]]>
+ </doc>
+ </method>
+ <method name="generateValueAggregator" return="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @return a value aggregator of the given type.]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate 1 or 2 aggregation-id/value pairs for the given key/value pair.
+ The first id will be of type LONG_VALUE_SUM, with "record_count" as
+ its aggregation id. If the input is a file split,
+ the second id of the same type will be generated too, with the file name
+ as its aggregation id. This achieves the behavior of counting the total number
+ of records in the input data, and the number of records in each input file.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[get the input file name.
+
+ @param job a job configuration object]]>
+ </doc>
+ </method>
+ <field name="UNIQ_VALUE_COUNT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VALUE_HISTOGRAM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputFile" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements the common functionalities of
+ the subclasses of ValueAggregatorDescriptor class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <class name="ValueAggregatorCombiner" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorCombiner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Combiner does not need to configure.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Combines values for a given key.
+ @param key the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values.
+ @param values the values to combine
+ @param output to collect combined values]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic combiner of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <interface name="ValueAggregatorDescriptor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pair.
+ This function is usually called by the mapper of an Aggregate based job.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configure the object
+
+ @param job
+ a JobConf object that may contain the information that can be used
+ to configure the object.]]>
+ </doc>
+ </method>
+ <field name="TYPE_SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ONE" type="org.apache.hadoop.io.Text"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This interface defines the contract a value aggregator descriptor must
+ support. Such a descriptor can be configured with a JobConf object. Its main
+ function is to generate a list of aggregation-id/value pairs. An aggregation
+ id encodes an aggregation type which is used to guide the way to aggregate
+ the value in the reduce/combiner phrase of an Aggregate based job.The mapper in
+ an Aggregate based map/reduce job may create one or more of
+ ValueAggregatorDescriptor objects at configuration time. For each input
+ key/value pair, the mapper will use those objects to create aggregation
+ id/value pairs.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <class name="ValueAggregatorJob" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorJob"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation. Generic hadoop
+ arguments are accepted.
+ @return a JobConf object ready for submission.
+
+ @throws IOException
+ @see GenericOptionsParser]]>
+ </doc>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setAggregatorDescriptors"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create and run an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the main class for creating a map/reduce job using Aggregate
+ framework. The Aggregate is a specialization of map/reduce framework,
+ specilizing for performing various simple aggregations.
+
+ Generally speaking, in order to implement an application using Map/Reduce
+ model, the developer is to implement Map and Reduce functions (and possibly
+ combine function). However, a lot of applications related to counting and
+ statistics computing have very similar characteristics. Aggregate abstracts
+ out the general patterns of these functions and implementing those patterns.
+ In particular, the package provides generic mapper/redducer/combiner classes,
+ and a set of built-in value aggregators, and a generic utility class that
+ helps user create map/reduce jobs using the generic class. The built-in
+ aggregators include:
+
+ sum over numeric values count the number of distinct values compute the
+ histogram of values compute the minimum, maximum, media,average, standard
+ deviation of numeric values
+
+ The developer using Aggregate will need only to provide a plugin class
+ conforming to the following interface:
+
+ public interface ValueAggregatorDescriptor { public ArrayList<Entry>
+ generateKeyValPairs(Object key, Object value); public void
+ configure(JobConfjob); }
+
+ The package also provides a base class, ValueAggregatorBaseDescriptor,
+ implementing the above interface. The user can extend the base class and
+ implement generateKeyValPairs accordingly.
+
+ The primary work of generateKeyValPairs is to emit one or more key/value
+ pairs based on the input key/value pair. The key in an output key/value pair
+ encode two pieces of information: aggregation type and aggregation id. The
+ value will be aggregated onto the aggregation id according the aggregation
+ type.
+
+ This class offers a function to generate a map/reduce job using Aggregate
+ framework. The function takes the following parameters: input directory spec
+ input format (text or sequence file) output directory a file specifying the
+ user plugin class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <class name="ValueAggregatorJobBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K1, V1, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="ValueAggregatorJobBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="logSpec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="aggregatorDescriptorList" type="java.util.ArrayList&lt;org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This abstract class implements some common functionalities of the
+ the generic mapper, reducer and combiner classes of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <class name="ValueAggregatorMapper" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[the map function. It iterates through the value aggregator descriptor
+ list to generate aggregation id/value pairs and emit them.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="org.apache.hadoop.io.Text"/>
+ <param name="arg1" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic mapper of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <class name="ValueAggregatorReducer" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param key
+ the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values. In effect, data
+ driven computing is achieved. It is assumed that each aggregator's
+ getReport method emits appropriate output for the aggregator. This
+ may be further customiized.
+ @value the values to be aggregated]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic reducer of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+ <class name="ValueHistogram" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="ValueHistogram"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add the given val to the aggregator.
+
+ @param val the value to be added. It is expected to be a string
+ in the form of xxxx\tnum, meaning xxxx has num occurrences.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this aggregator.
+ It includes the following basic statistics of the histogram:
+ the number of unique values
+ the minimum value
+ the media value
+ the maximum value
+ the average value
+ the standard deviation]]>
+ </doc>
+ </method>
+ <method name="getReportDetails" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a string representation of the list of value/frequence pairs of
+ the histogram]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a list value/frequence pairs.
+ The return value is expected to be used by the reducer.]]>
+ </doc>
+ </method>
+ <method name="getReportItems" return="java.util.TreeMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a TreeMap representation of the histogram]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that computes the
+ histogram of a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+</package>
+<package name="org.apache.hadoop.mapred.lib.db">
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBConfiguration -->
+ <class name="DBConfiguration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="configureDB"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="driverClass" type="java.lang.String"/>
+ <param name="dbUrl" type="java.lang.String"/>
+ <param name="userName" type="java.lang.String"/>
+ <param name="passwd" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the DB access related fields in the JobConf.
+ @param job the job
+ @param driverClass JDBC Driver class name
+ @param dbUrl JDBC DB access URL.
+ @param userName DB access username
+ @param passwd DB access passwd]]>
+ </doc>
+ </method>
+ <method name="configureDB"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="driverClass" type="java.lang.String"/>
+ <param name="dbUrl" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the DB access related fields in the JobConf.
+ @param job the job
+ @param driverClass JDBC Driver class name
+ @param dbUrl JDBC DB access URL.]]>
+ </doc>
+ </method>
+ <field name="DRIVER_CLASS_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The JDBC Driver class name]]>
+ </doc>
+ </field>
+ <field name="URL_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[JDBC Database access URL]]>
+ </doc>
+ </field>
+ <field name="USERNAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[User name to access the database]]>
+ </doc>
+ </field>
+ <field name="PASSWORD_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Password to access the database]]>
+ </doc>
+ </field>
+ <field name="INPUT_TABLE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Input table name]]>
+ </doc>
+ </field>
+ <field name="INPUT_FIELD_NAMES_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Field names in the Input table]]>
+ </doc>
+ </field>
+ <field name="INPUT_CONDITIONS_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[WHERE clause in the input SELECT statement]]>
+ </doc>
+ </field>
+ <field name="INPUT_ORDER_BY_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[ORDER BY clause in the input SELECT statement]]>
+ </doc>
+ </field>
+ <field name="INPUT_QUERY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Whole input query, exluding LIMIT...OFFSET]]>
+ </doc>
+ </field>
+ <field name="INPUT_COUNT_QUERY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Input query to get the count of records]]>
+ </doc>
+ </field>
+ <field name="INPUT_CLASS_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Class name implementing DBWritable which will hold input tuples]]>
+ </doc>
+ </field>
+ <field name="OUTPUT_TABLE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Output table name]]>
+ </doc>
+ </field>
+ <field name="OUTPUT_FIELD_NAMES_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Field names in the Output table]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A container for configuration property names for jobs with DB input/output.
+ <br>
+ The job can be configured using the static methods in this class,
+ {@link DBInputFormat}, and {@link DBOutputFormat}.
+ <p>
+ Alternatively, the properties can be set in the configuration with proper
+ values.
+
+ @see DBConfiguration#configureDB(JobConf, String, String, String, String)
+ @see DBInputFormat#setInput(JobConf, Class, String, String)
+ @see DBInputFormat#setInput(JobConf, Class, String, String, String, String...)
+ @see DBOutputFormat#setOutput(JobConf, String, String...)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBConfiguration -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat -->
+ <class name="DBInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;org.apache.hadoop.io.LongWritable, T&gt;"/>
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="DBInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="chunks" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getCountQuery" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the query for getting the total number of rows,
+ subclasses can override this for custom behaviour.]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.lib.db.DBWritable&gt;"/>
+ <param name="tableName" type="java.lang.String"/>
+ <param name="conditions" type="java.lang.String"/>
+ <param name="orderBy" type="java.lang.String"/>
+ <param name="fieldNames" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Initializes the map-part of the job with the appropriate input settings.
+
+ @param job The job
+ @param inputClass the class object implementing DBWritable, which is the
+ Java object holding tuple fields.
+ @param tableName The table to read data from
+ @param conditions The condition which to select data with, eg. '(updated >
+ 20070101 AND length > 0)'
+ @param orderBy the fieldNames in the orderBy clause.
+ @param fieldNames The field names in the table
+ @see #setInput(JobConf, Class, String, String)]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.lib.db.DBWritable&gt;"/>
+ <param name="inputQuery" type="java.lang.String"/>
+ <param name="inputCountQuery" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Initializes the map-part of the job with the appropriate input settings.
+
+ @param job The job
+ @param inputClass the class object implementing DBWritable, which is the
+ Java object holding tuple fields.
+ @param inputQuery the input query to select fields. Example :
+ "SELECT f1, f2, f3 FROM Mytable ORDER BY f1"
+ @param inputCountQuery the input query that returns the number of records in
+ the table.
+ Example : "SELECT COUNT(f1) FROM Mytable"
+ @see #setInput(JobConf, Class, String, String, String, String...)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A InputFormat that reads input data from an SQL table.
+ <p>
+ DBInputFormat emits LongWritables containing the record number as
+ key and DBWritables as value.
+
+ The SQL query, and input class can be using one of the two
+ setInput methods.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat.DBInputSplit -->
+ <class name="DBInputFormat.DBInputSplit" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="DBInputFormat.DBInputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DBInputFormat.DBInputSplit" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convenience Constructor
+ @param start the index of the first row to select
+ @param end the index of the last row to select]]>
+ </doc>
+ </constructor>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getStart" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The index of the first row to select]]>
+ </doc>
+ </method>
+ <method name="getEnd" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The index of the last row to select]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return The total row count in this split]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="output" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A InputSplit that spans a set of rows]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat.DBInputSplit -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat.DBRecordReader -->
+ <class name="DBInputFormat.DBRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, T&gt;"/>
+ <constructor name="DBInputFormat.DBRecordReader" type="org.apache.hadoop.mapred.lib.db.DBInputFormat.DBInputSplit, java.lang.Class&lt;T&gt;, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ <doc>
+ <![CDATA[@param split The InputSplit to read data for
+ @throws SQLException]]>
+ </doc>
+ </constructor>
+ <method name="getSelectQuery" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the query for selecting the records,
+ subclasses can override this for custom behaviour.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createKey" return="org.apache.hadoop.io.LongWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createValue" return="T extends org.apache.hadoop.mapred.lib.db.DBWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.LongWritable"/>
+ <param name="value" type="T extends org.apache.hadoop.mapred.lib.db.DBWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A RecordReader that reads records from a SQL table.
+ Emits LongWritables containing the record number as
+ key and DBWritables as value.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat.DBRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat.NullDBWritable -->
+ <class name="DBInputFormat.NullDBWritable" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.db.DBWritable"/>
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="DBInputFormat.NullDBWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="java.sql.ResultSet"/>
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="java.sql.PreparedStatement"/>
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ </method>
+ <doc>
+ <![CDATA[A Class that does nothing, implementing DBWritable]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat.NullDBWritable -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBOutputFormat -->
+ <class name="DBOutputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="DBOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="constructQuery" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="table" type="java.lang.String"/>
+ <param name="fieldNames" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Constructs the query used as the prepared statement to insert data.]]>
+ </doc>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filesystem" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filesystem" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="tableName" type="java.lang.String"/>
+ <param name="fieldNames" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Initializes the reduce-part of the job with the appropriate output settings
+
+ @param job The job
+ @param tableName The table to insert data into
+ @param fieldNames The field names in the table]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A OutputFormat that sends the reduce output to a SQL table.
+ <p>
+ {@link DBOutputFormat} accepts &lt;key,value&gt; pairs, where
+ key has a type extending DBWritable. Returned {@link RecordWriter}
+ writes <b>only the key</b> to the database with a batch SQL query.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBOutputFormat.DBRecordWriter -->
+ <class name="DBOutputFormat.DBRecordWriter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"/>
+ <constructor name="DBOutputFormat.DBRecordWriter" type="java.sql.Connection, java.sql.PreparedStatement"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.mapred.lib.db.DBWritable"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A RecordWriter that writes the reduce output to a SQL table]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBOutputFormat.DBRecordWriter -->
+ <!-- start interface org.apache.hadoop.mapred.lib.db.DBWritable -->
+ <interface name="DBWritable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="statement" type="java.sql.PreparedStatement"/>
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ <doc>
+ <![CDATA[Sets the fields of the object in the {@link PreparedStatement}.
+ @param statement the statement that the fields are put into.
+ @throws SQLException]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="resultSet" type="java.sql.ResultSet"/>
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ <doc>
+ <![CDATA[Reads the fields of the object from the {@link ResultSet}.
+ @param resultSet the {@link ResultSet} to get the fields from.
+ @throws SQLException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Objects that are read from/written to a database should implement
+ <code>DBWritable</code>. DBWritable, is similar to {@link Writable}
+ except that the {@link #write(PreparedStatement)} method takes a
+ {@link PreparedStatement}, and {@link #readFields(ResultSet)}
+ takes a {@link ResultSet}.
+ <p>
+ Implementations are responsible for writing the fields of the object
+ to PreparedStatement, and reading the fields of the object from the
+ ResultSet.
+
+ <p>Example:</p>
+ If we have the following table in the database :
+ <pre>
+ CREATE TABLE MyTable (
+ counter INTEGER NOT NULL,
+ timestamp BIGINT NOT NULL,
+ );
+ </pre>
+ then we can read/write the tuples from/to the table with :
+ <p><pre>
+ public class MyWritable implements Writable, DBWritable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ //Writable#write() implementation
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ //Writable#readFields() implementation
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public void write(PreparedStatement statement) throws SQLException {
+ statement.setInt(1, counter);
+ statement.setLong(2, timestamp);
+ }
+
+ public void readFields(ResultSet resultSet) throws SQLException {
+ counter = resultSet.getInt(1);
+ timestamp = resultSet.getLong(2);
+ }
+ }
+ </pre></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.db.DBWritable -->
+</package>
+<package name="org.apache.hadoop.mapred.pipes">
+ <!-- start class org.apache.hadoop.mapred.pipes.Submitter -->
+ <class name="Submitter" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="Submitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Submitter" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExecutable" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the URI of the application's executable.
+ @param conf
+ @return the URI where the application's executable is located]]>
+ </doc>
+ </method>
+ <method name="setExecutable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="executable" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the URI for the application's executable. Normally this is a hdfs:
+ location.
+ @param conf
+ @param executable The URI of the application's executable.]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job is using a Java RecordReader.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordReader" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java RecordReader
+ @param conf the configuration to check
+ @return is it a Java RecordReader?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Mapper is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaMapper" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Mapper.
+ @param conf the configuration to check
+ @return is it a Java Mapper?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaReducer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Reducer is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaReducer" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Reducer.
+ @param conf the configuration to check
+ @return is it a Java Reducer?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job will use a Java RecordWriter.
+ @param conf the configuration to modify
+ @param value the new value to set]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordWriter" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Will the reduce use a Java RecordWriter?
+ @param conf the configuration to check
+ @return true, if the output of the job will be written by Java]]>
+ </doc>
+ </method>
+ <method name="getKeepCommandFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Does the user want to keep the command file for debugging? If this is
+ true, pipes will write a copy of the command data to a file in the
+ task directory named "downlink.data", which may be used to run the C++
+ program under the debugger. You probably also want to set
+ JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from
+ being deleted.
+ To run using the data file, set the environment variable
+ "hadoop.pipes.command.file" to point to the file.
+ @param conf the configuration to check
+ @return will the framework save the command file?]]>
+ </doc>
+ </method>
+ <method name="setKeepCommandFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether to keep the command file for debugging
+ @param conf the configuration to modify
+ @param keep the new value]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link Submitter#runJob(JobConf)}">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications
+ to the job to run under pipes are made to the configuration.
+ @param conf the job to submit to the cluster (MODIFIED)
+ @throws IOException
+ @deprecated Use {@link Submitter#runJob(JobConf)}]]>
+ </doc>
+ </method>
+ <method name="runJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications
+ to the job to run under pipes are made to the configuration.
+ @param conf the job to submit to the cluster (MODIFIED)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="jobSubmit" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the Map-Reduce framework.
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param conf the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Submit a pipes job based on the command line arguments.
+ @param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The main entry point and job submitter. It may either be used as a command
+ line-based or API-based method to launch Pipes jobs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.pipes.Submitter -->
+</package>
+<package name="org.apache.hadoop.metrics">
+ <!-- start class org.apache.hadoop.metrics.ContextFactory -->
+ <class name="ContextFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ContextFactory"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of ContextFactory]]>
+ </doc>
+ </constructor>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the named attribute, or null if there is no
+ attribute of that name.
+
+ @param attributeName the attribute name
+ @return the attribute value]]>
+ </doc>
+ </method>
+ <method name="getAttributeNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all the factory's attributes.
+
+ @return the attribute names]]>
+ </doc>
+ </method>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Sets the named factory attribute to the specified value, creating it
+ if it did not already exist. If the value is null, this is the same as
+ calling removeAttribute.
+
+ @param attributeName the attribute name
+ @param value the new attribute value]]>
+ </doc>
+ </method>
+ <method name="removeAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes the named attribute if it exists.
+
+ @param attributeName the attribute name]]>
+ </doc>
+ </method>
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <exception name="InstantiationException" type="java.lang.InstantiationException"/>
+ <exception name="IllegalAccessException" type="java.lang.IllegalAccessException"/>
+ <doc>
+ <![CDATA[Returns the named MetricsContext instance, constructing it if necessary
+ using the factory's current configuration attributes. <p/>
+
+ When constructing the instance, if the factory property
+ <i>contextName</i>.class</code> exists,
+ its value is taken to be the name of the class to instantiate. Otherwise,
+ the default is to create an instance of
+ <code>org.apache.hadoop.metrics.spi.NullContext</code>, which is a
+ dummy "no-op" context which will cause all metric data to be discarded.
+
+ @param contextName the name of the context
+ @return the named MetricsContext]]>
+ </doc>
+ </method>
+ <method name="getNullContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a "null" context - one which does nothing.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the singleton ContextFactory instance, constructing it if
+ necessary. <p/>
+
+ When the instance is constructed, this method checks if the file
+ <code>hadoop-metrics.properties</code> exists on the class path. If it
+ exists, it must be in the format defined by java.util.Properties, and all
+ the properties in the file are set as attributes on the newly created
+ ContextFactory instance.
+
+ @return the singleton ContextFactory instance]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factory class for creating MetricsContext objects. To obtain an instance
+ of this class, use the static <code>getFactory()</code> method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ContextFactory -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsContext -->
+ <interface name="MetricsContext" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.
+
+ @return the context name]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records as they are
+ updated.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free any data that the implementation
+ may have buffered for sending at the next timer event. It
+ is OK to call <code>startMonitoring()</code> again after calling
+ this.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and also frees any buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new MetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at regular time intervals, as
+ determined by the implementation-class specific configuration.
+
+ @param updater object to be run periodically; it should updated
+ some metrics records and then return]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_PERIOD" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default period in seconds at which data is sent to the metrics system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The main interface to the metrics package.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.MetricsException -->
+ <class name="MetricsException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException]]>
+ </doc>
+ </constructor>
+ <constructor name="MetricsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException
+
+ @param message an error message]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[General-purpose, unchecked metrics exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsException -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsRecord -->
+ <interface name="MetricsRecord" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value. The tagValue may be null,
+ which is treated the same as an empty String.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.
+
+ @param tagName name of a tag]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes, from the buffered data table, all rows having tags
+ that equal the tags that have been set on this record. For example,
+ if there are no tags on this record, all rows for this record name
+ would be removed. Or, if there is a single tag on this record, then
+ just rows containing a tag with the same name and value would be removed.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A named and optionally tagged set of records to be sent to the metrics
+ system. <p/>
+
+ A record name identifies the kind of data to be reported. For example, a
+ program reporting statistics relating to the disks on a computer might use
+ a record name "diskStats".<p/>
+
+ A record has zero or more <i>tags</i>. A tag has a name and a value. To
+ continue the example, the "diskStats" record might use a tag named
+ "diskName" to identify a particular disk. Sometimes it is useful to have
+ more than one tag, so there might also be a "diskType" with value "ide" or
+ "scsi" or whatever.<p/>
+
+ A record also has zero or more <i>metrics</i>. These are the named
+ values that are to be reported to the metrics system. In the "diskStats"
+ example, possible metric names would be "diskPercentFull", "diskPercentBusy",
+ "kbReadPerSecond", etc.<p/>
+
+ The general procedure for using a MetricsRecord is to fill in its tag and
+ metric values, and then call <code>update()</code> to pass the record to the
+ client library.
+ Metric data is not immediately sent to the metrics system
+ each time that <code>update()</code> is called.
+ An internal table is maintained, identified by the record name. This
+ table has columns
+ corresponding to the tag and the metric names, and rows
+ corresponding to each unique set of tag values. An update
+ either modifies an existing row in the table, or adds a new row with a set of
+ tag values that are different from all the other rows. Note that if there
+ are no tags, then there can be at most one row in the table. <p/>
+
+ Once a row is added to the table, its data will be sent to the metrics system
+ on every timer period, whether or not it has been updated since the previous
+ timer period. If this is inappropriate, for example if metrics were being
+ reported by some transient object in an application, the <code>remove()</code>
+ method can be used to remove the row and thus stop the data from being
+ sent.<p/>
+
+ Note that the <code>update()</code> method is atomic. This means that it is
+ safe for different threads to be updating the same metric. More precisely,
+ it is OK for different threads to call <code>update()</code> on MetricsRecord instances
+ with the same set of tag names and tag values. Different threads should
+ <b>not</b> use the same MetricsRecord instance at the same time.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsRecord -->
+ <!-- start class org.apache.hadoop.metrics.MetricsUtil -->
+ <class name="MetricsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to return the named context.
+ If the desired context cannot be created for any reason, the exception
+ is logged, and a null context is returned.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to create and return new metrics record instance within the
+ given context. This record is tagged with the host name.
+
+ @param context the context
+ @param recordName name of the record
+ @return newly created metrics record]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility class to simplify creation and reporting of hadoop metrics.
+
+ For examples of usage, see NameNodeMetrics.
+ @see org.apache.hadoop.metrics.MetricsRecord
+ @see org.apache.hadoop.metrics.MetricsContext
+ @see org.apache.hadoop.metrics.ContextFactory]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsUtil -->
+ <!-- start interface org.apache.hadoop.metrics.Updater -->
+ <interface name="Updater" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Timer-based call-back from the metric library.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Call-back interface. See <code>MetricsContext.registerUpdater()</code>.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.Updater -->
+</package>
+<package name="org.apache.hadoop.metrics.file">
+ <!-- start class org.apache.hadoop.metrics.file.FileContext -->
+ <class name="FileContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="getFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the configured file name, or null.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, by opening in append-mode, the
+ file specified by the <code>fileName</code> attribute,
+ if specified. Otherwise the data will be written to standard
+ output.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring, closing the file.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Emits a metrics record to a file.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Flushes the output writer, forcing updates to disk.]]>
+ </doc>
+ </method>
+ <field name="FILE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="PERIOD_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Metrics context for writing metrics to a file.<p/>
+
+ This class is configured by setting ContextFactory attributes which in turn
+ are usually configured through a properties file. All the attributes are
+ prefixed by the contextName. For example, the properties file might contain:
+ <pre>
+ myContextName.fileName=/tmp/metrics.log
+ myContextName.period=5
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.file.FileContext -->
+</package>
+<package name="org.apache.hadoop.metrics.ganglia">
+ <!-- start class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+ <class name="GangliaContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GangliaContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of GangliaContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Context for sending metrics to Ganglia.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+</package>
+<package name="org.apache.hadoop.metrics.jvm">
+ <!-- start class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <class name="EventCounter" extends="org.apache.log4j.AppenderSkeleton"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="EventCounter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getFatal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getError" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWarn" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfo" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="requiresLayout" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A log4J Appender that simply counts logging events in three levels:
+ fatal, error and warn.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <!-- start class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+ <class name="JvmMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="init" return="org.apache.hadoop.metrics.jvm.JvmMetrics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="processName" type="java.lang.String"/>
+ <param name="sessionId" type="java.lang.String"/>
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[This will be called periodically (with the period being configuration
+ dependent).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Singleton class which eports Java Virtual Machine metrics to the metrics API.
+ Any application can create an instance of this class in order to emit
+ Java VM metrics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+</package>
+<package name="org.apache.hadoop.metrics.spi">
+ <!-- start class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <class name="AbstractMetricsContext" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsContext"/>
+ <constructor name="AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of AbstractMetricsContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ <doc>
+ <![CDATA[Initializes the context.]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for subclasses to access factory attributes.]]>
+ </doc>
+ </method>
+ <method name="getAttributeTable" return="java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="tableName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an attribute-value map derived from the factory attributes
+ by finding all factory attributes that begin with
+ <i>contextName</i>.<i>tableName</i>. The returned map consists of
+ those attributes with the contextName and tableName stripped off.]]>
+ </doc>
+ </method>
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.]]>
+ </doc>
+ </method>
+ <method name="getContextFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the factory by which this context was created.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free buffered data.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and frees buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new AbstractMetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="newRecord" return="org.apache.hadoop.metrics.spi.MetricsRecordImpl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Subclasses should override this if they subclass MetricsRecordImpl.
+ @param recordName the name of the record
+ @return newly created instance of MetricsRecordImpl or subclass]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at time intervals determined by
+ the configuration.
+
+ @param updater object to be run periodically; it should update
+ some metrics records]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sends a record to the metrics system.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called each period after all records have been emitted, this method does nothing.
+ Subclasses may override it in order to perform some kind of flush.]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.update(). Creates or updates a row in
+ the internal table of metric data.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.remove(). Removes all matching rows in
+ the internal table of metric data. A row matches if it has the same
+ tag names and values as record, but it may also have additional
+ tags.]]>
+ </doc>
+ </method>
+ <method name="getPeriod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the timer period.]]>
+ </doc>
+ </method>
+ <method name="setPeriod"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="period" type="int"/>
+ <doc>
+ <![CDATA[Sets the timer period]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The main class of the Service Provider Interface. This class should be
+ extended in order to integrate the Metrics API with a specific metrics
+ client library. <p/>
+
+ This class implements the internal table of metric data, and the timer
+ on which data is to be sent to the metrics system. Subclasses must
+ override the abstract <code>emitRecord</code> method in order to transmit
+ the data. <p/>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <class name="MetricsRecordImpl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsRecord"/>
+ <constructor name="MetricsRecordImpl" type="java.lang.String, org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileRecord]]>
+ </doc>
+ </constructor>
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes the row, if it exists, in the buffered data table having tags
+ that equal the tags that have been set on this record.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of MetricsRecord. Keeps a back-pointer to the context
+ from which it was created, and delegates back to it on <code>update</code>
+ and <code>remove()</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricValue -->
+ <class name="MetricValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricValue" type="java.lang.Number, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricValue]]>
+ </doc>
+ </constructor>
+ <method name="isIncrement" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumber" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="ABSOLUTE" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCREMENT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Number that is either an absolute or an incremental amount.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricValue -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContext -->
+ <class name="NullContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContext]]>
+ </doc>
+ </constructor>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do-nothing version of startMonitoring]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Null metrics context: a metrics context which does nothing. Used as the
+ default context, so that no performance data is emitted if no configuration
+ data is found.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <class name="NullContextWithUpdateThread" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContextWithUpdateThread"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContextWithUpdateThread]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A null context which has a thread calling
+ periodically when monitoring is started. This keeps the data sampled
+ correctly.
+ In all other respects, this is like the NULL context: No data is emitted.
+ This is suitable for Monitoring systems like JMX which reads the metrics
+ when someone reads the data from JMX.
+
+ The default impl of start and stop monitoring:
+ is the AbstractMetricsContext is good enough.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <!-- start class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <class name="OutputRecord" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTagNames" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of tag names]]>
+ </doc>
+ </method>
+ <method name="getTag" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a tag object which is can be a String, Integer, Short or Byte.
+
+ @return the tag value, or null if there is no such tag]]>
+ </doc>
+ </method>
+ <method name="getMetricNames" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of metric names.]]>
+ </doc>
+ </method>
+ <method name="getMetric" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the metric object which can be a Float, Integer, Short or Byte.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents a record of metric data to be sent to a metrics system.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <!-- start class org.apache.hadoop.metrics.spi.Util -->
+ <class name="Util" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="parse" return="java.util.List&lt;java.net.InetSocketAddress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="specs" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Parses a space and/or comma separated sequence of server specifications
+ of the form <i>hostname</i> or <i>hostname:port</i>. If
+ the specs string is null, defaults to localhost:defaultPort.
+
+ @return a list of InetSocketAddress objects.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Static utility methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.Util -->
+</package>
+<package name="org.apache.hadoop.metrics.util">
+ <!-- start class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <class name="MBeanUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MBeanUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="registerMBean" return="javax.management.ObjectName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="serviceName" type="java.lang.String"/>
+ <param name="nameName" type="java.lang.String"/>
+ <param name="theMbean" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Register the mbean using out standard MBeanName format
+ "hadoop.dfs:service=<serviceName>,name=<nameName>"
+ Where the <serviceName> and <nameName> are the supplied parameters
+
+ @param serviceName
+ @param nameName
+ @param theMbean - the MBean to register
+ @return the named used to register the MBean]]>
+ </doc>
+ </method>
+ <method name="unregisterMBean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mbeanName" type="javax.management.ObjectName"/>
+ </method>
+ <doc>
+ <![CDATA[This util class provides a method to register an MBean using
+ our standard naming convention as described in the doc
+ for {link {@link #registerMBean(String, String, Object)}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <class name="MetricsIntValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsIntValue" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="int"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - value to be added]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param decr - value to subtract]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Dec metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsIntValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsLongValue -->
+ <class name="MetricsLongValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsLongValue" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="long"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - value to be added]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decr" type="long"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param decr - value to subtract]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Dec metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsLongValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsLongValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <class name="MetricsTimeVaryingInt" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingInt" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - number of operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #previousIntervalValue}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Value at the Previous interval
+ @return prev interval value]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingInt class is for a metric that naturally
+ varies over time (e.g. number of files created).
+ The metric is is published at interval heart beat (the interval
+ is set in the metrics config file).
+ Note if one wants a time associated with the metric then use
+ @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+ <class name="MetricsTimeVaryingRate" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingRate" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param n the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numOps" type="int"/>
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for numOps operations
+ @param numOps - number of operations
+ @param time - time for numOps operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for one operation
+ @param time for one operation]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and
+ {@link #getPreviousIntervalNumOps()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalNumOps" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of operations in the previous interval
+ @return - ops in prev interval]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalAverageTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The average rate of an operation in the previous interval
+ @return - the average rate.]]>
+ </doc>
+ </method>
+ <method name="getMinTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The min time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return min time for an operation]]>
+ </doc>
+ </method>
+ <method name="getMaxTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The max time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return max time for an operation]]>
+ </doc>
+ </method>
+ <method name="resetMinMax"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the min max values]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingRate class is for a rate based metric that
+ naturally varies over time (e.g. time taken to create a file).
+ The rate is averaged at each interval heart beat (the interval
+ is set in the metrics config file).
+ This class also keeps track of the min and max rates along with
+ a method to reset the min-max.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+</package>
+<package name="org.apache.hadoop.net">
+ <!-- start class org.apache.hadoop.net.CachedDNSToSwitchMapping -->
+ <class name="CachedDNSToSwitchMapping" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.DNSToSwitchMapping"/>
+ <constructor name="CachedDNSToSwitchMapping" type="org.apache.hadoop.net.DNSToSwitchMapping"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="resolve" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List&lt;java.lang.String&gt;"/>
+ </method>
+ <field name="rawMapping" type="org.apache.hadoop.net.DNSToSwitchMapping"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A cached implementation of DNSToSwitchMapping that takes an
+ raw DNSToSwitchMapping and stores the resolved network location in
+ a cache. The following calls to a resolved network location
+ will get its location from the cache.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.CachedDNSToSwitchMapping -->
+ <!-- start class org.apache.hadoop.net.DNS -->
+ <class name="DNS" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DNS"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reverseDns" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hostIp" type="java.net.InetAddress"/>
+ <param name="ns" type="java.lang.String"/>
+ <exception name="NamingException" type="javax.naming.NamingException"/>
+ <doc>
+ <![CDATA[Returns the hostname associated with the specified IP address by the
+ provided nameserver.
+
+ @param hostIp
+ The address to reverse lookup
+ @param ns
+ The host name of a reachable DNS server
+ @return The host name associated with the provided IP
+ @throws NamingException
+ If a NamingException is encountered]]>
+ </doc>
+ </method>
+ <method name="getIPs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the IPs associated with the provided interface, if any, in
+ textual form.
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return A string vector of all the IPs associated with the provided
+ interface
+ @throws UnknownHostException
+ If an UnknownHostException is encountered in querying the
+ default interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultIP" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the first available IP address associated with the provided
+ network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The IP address in text form
+ @throws UnknownHostException
+ If one is encountered in querying the default interface]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the provided nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return A string vector of all host names associated with the IPs tied to
+ the specified interface
+ @throws UnknownHostException]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the default nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The list of host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the provided
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return The default host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the default
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The default host name associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides direct and reverse lookup functionalities, allowing
+ the querying of specific network interfaces or nameservers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.DNS -->
+ <!-- start interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <interface name="DNSToSwitchMapping" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="resolve" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List&lt;java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[Resolves a list of DNS-names/IP-addresses and returns back a list of
+ switch information (network paths). One-to-one correspondence must be
+ maintained between the elements in the lists.
+ Consider an element in the argument list - x.y.com. The switch information
+ that is returned must be a network path of the form /foo/rack,
+ where / is the root, and 'foo' is the switch where 'rack' is connected.
+ Note the hostname/ip-address is not part of the returned path.
+ The network topology of the cluster would determine the number of
+ components in the network path.
+ @param names
+ @return list of resolved network paths]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An interface that should be implemented to allow pluggable
+ DNS-name/IP-address to RackID resolvers.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <!-- start class org.apache.hadoop.net.NetUtils -->
+ <class name="NetUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="clazz" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Get the socket factory for the given class according to its
+ configuration parameter
+ <tt>hadoop.rpc.socket.factory.class.&lt;ClassName&gt;</tt>. When no
+ such parameter exists then fall back on the default socket factory as
+ configured by <tt>hadoop.rpc.socket.factory.class.default</tt>. If
+ this default socket factory is not configured, then fall back on the JVM
+ default socket factory.
+
+ @param conf the configuration
+ @param clazz the class (usually a {@link VersionedProtocol})
+ @return a socket factory]]>
+ </doc>
+ </method>
+ <method name="getDefaultSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default socket factory as specified by the configuration
+ parameter <tt>hadoop.rpc.socket.factory.default</tt>
+
+ @param conf the configuration
+ @return the default socket factory as specified in the configuration or
+ the JVM default socket factory if the configuration does not
+ contain a default socket factory property.]]>
+ </doc>
+ </method>
+ <method name="getSocketFactoryFromProperty" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="propValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the socket factory corresponding to the given proxy URI. If the
+ given proxy URI corresponds to an absence of configuration parameter,
+ returns null. If the URI is malformed raises an exception.
+
+ @param propValue the property which is the class name of the
+ SocketFactory to instantiate; assumed non null and non empty.
+ @return a socket factory as defined in the property value.]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="getServerAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="oldBindAddressName" type="java.lang.String"/>
+ <param name="oldPortName" type="java.lang.String"/>
+ <param name="newBindAddressName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Handle the transition from pairs of attributes specifying a host and port
+ to a single colon separated one.
+ @param conf the configuration to check
+ @param oldBindAddressName the old address attribute name
+ @param oldPortName the old port attribute name
+ @param newBindAddressName the new combined name
+ @return the complete address from the configuration]]>
+ </doc>
+ </method>
+ <method name="addStaticResolution"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="resolvedName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a static resolution for host. This can be used for setting up
+ hostnames with names that are fake to point to a well known host. For e.g.
+ in some testcases we require to have daemons with different hostnames
+ running on the same machine. In order to create connections to these
+ daemons, one can set up mappings from those hostnames to "localhost".
+ {@link NetUtils#getStaticResolution(String)} can be used to query for
+ the actual hostname.
+ @param host
+ @param resolvedName]]>
+ </doc>
+ </method>
+ <method name="getStaticResolution" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Retrieves the resolved name for the passed host. The resolved name must
+ have been set earlier using
+ {@link NetUtils#addStaticResolution(String, String)}
+ @param host
+ @return the resolution]]>
+ </doc>
+ </method>
+ <method name="getAllStaticResolutions" return="java.util.List&lt;java.lang.String[]&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is used to get all the resolutions that were added using
+ {@link NetUtils#addStaticResolution(String, String)}. The return
+ value is a List each element of which contains an array of String
+ of the form String[0]=hostname, String[1]=resolved-hostname
+ @return the list of resolutions]]>
+ </doc>
+ </method>
+ <method name="getConnectAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="server" type="org.apache.hadoop.ipc.Server"/>
+ <doc>
+ <![CDATA[Returns InetSocketAddress that a client can use to
+ connect to the server. Server.getListenerAddress() is not correct when
+ the server binds to "0.0.0.0". This returns "127.0.0.1:port" when
+ the getListenerAddress() returns "0.0.0.0:port".
+
+ @param server
+ @return socket address that a client can use to connect to the server.]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getInputStream(socket, socket.getSoTimeout()).<br><br>
+
+ From documentation for {@link #getInputStream(Socket, long)}:<br>
+ Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see #getInputStream(Socket, long)
+
+ @param socket
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getOutputStream(socket, 0). Timeout of zero implies write will
+ wait until data is available.<br><br>
+
+ From documentation for {@link #getOutputStream(Socket, long)} : <br>
+ Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see #getOutputStream(Socket, long)
+
+ @param socket
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="normalizeHostName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a string representation of a host, return its ip address
+ in textual presentation.
+
+ @param name a string representation of a host:
+ either a textual representation its IP address or its host name
+ @return its IP address in the string format]]>
+ </doc>
+ </method>
+ <method name="normalizeHostNames" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.Collection&lt;java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[Given a collection of string representation of hosts, return a list of
+ corresponding IP addresses in the textual representation.
+
+ @param names a collection of string representations of hosts
+ @return a list of corresponding IP addresses in the string format
+ @see #normalizeHostName(String)]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetUtils -->
+ <!-- start class org.apache.hadoop.net.NetworkTopology -->
+ <class name="NetworkTopology" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetworkTopology"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Add a leaf node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be added
+ @exception IllegalArgumentException if add a node to a leave
+ or node to be added is not a leaf]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Remove a node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be removed]]>
+ </doc>
+ </method>
+ <method name="contains" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if the tree contains node <i>node</i>
+
+ @param node
+ a node
+ @return true if <i>node</i> is already in the tree; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="loc" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a string representation of a node, return its reference
+
+ @param loc
+ a path-like string representation of a node
+ @return a reference to the node; null if the node is not in the tree]]>
+ </doc>
+ </method>
+ <method name="getNumOfRacks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of racks]]>
+ </doc>
+ </method>
+ <method name="getNumOfLeaves" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of nodes]]>
+ </doc>
+ </method>
+ <method name="getDistance" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return the distance between two nodes
+ It is assumed that the distance from one node to its parent is 1
+ The distance between two nodes is calculated by summing up their distances
+ to their closest common ancestor.
+ @param node1 one node
+ @param node2 another node
+ @return the distance between node1 and node2
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="isOnSameRack" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if two nodes are on the same rack
+ @param node1 one node
+ @param node2 another node
+ @return true if node1 and node2 are pm the same rack; false otherwise
+ @exception IllegalArgumentException when either node1 or node2 is null, or
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="chooseRandom" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <doc>
+ <![CDATA[randomly choose one node from <i>scope</i>
+ if scope starts with ~, choose one from the all nodes except for the
+ ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
+ @param scope range of nodes from which a node will be choosen
+ @return the choosen node]]>
+ </doc>
+ </method>
+ <method name="countNumOfAvailableNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <param name="excludedNodes" type="java.util.List&lt;org.apache.hadoop.net.Node&gt;"/>
+ <doc>
+ <![CDATA[return the number of leaves in <i>scope</i> but not in <i>excludedNodes</i>
+ if scope starts with ~, return the number of nodes that are not
+ in <i>scope</i> and <i>excludedNodes</i>;
+ @param scope a path string that may start with ~
+ @param excludedNodes a list of nodes
+ @return number of available nodes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[convert a network tree to a string]]>
+ </doc>
+ </method>
+ <method name="pseudoSortByDistance"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reader" type="org.apache.hadoop.net.Node"/>
+ <param name="nodes" type="org.apache.hadoop.net.Node[]"/>
+ <doc>
+ <![CDATA[Sort nodes array by their distances to <i>reader</i>
+ It linearly scans the array, if a local node is found, swap it with
+ the first element of the array.
+ If a local rack node is found, swap it with the first element following
+ the local node.
+ If neither local node or local rack node is found, put a random replica
+ location at postion 0.
+ It leaves the rest nodes untouched.]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_RACK" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_HOST_LEVEL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The class represents a cluster of computer with a tree hierarchical
+ network topology.
+ For example, a cluster may be consists of many data centers filled
+ with racks of computers.
+ In a network topology, leaves represent data nodes (computers) and inner
+ nodes represent switches/routers that manage traffic in/out of data centers
+ or racks.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetworkTopology -->
+ <!-- start interface org.apache.hadoop.net.Node -->
+ <interface name="Node" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the string representation of this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the node's network location]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface defines a node in a network topology.
+ A node may be a leave representing a data node or an inner
+ node representing a datacenter or rack.
+ Each data has a name and its location in the network is
+ decided by a string with syntax similar to a file name.
+ For example, a data node's name is hostname:port# and if it's located at
+ rack "orange" in datacenter "dog", the string representation of its
+ network location is /dog/orange]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.Node -->
+ <!-- start class org.apache.hadoop.net.NodeBase -->
+ <class name="NodeBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.Node"/>
+ <constructor name="NodeBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its path
+ @param path
+ a concatenation of this node's location, the path seperator, and its name]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String, org.apache.hadoop.net.Node, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location
+ @param parent this node's parent node
+ @param level this node's level in the tree]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set this node's network location]]>
+ </doc>
+ </method>
+ <method name="getPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return this node's path]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's string representation]]>
+ </doc>
+ </method>
+ <method name="normalize" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Normalize a path]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="level" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree]]>
+ </doc>
+ </method>
+ <field name="PATH_SEPARATOR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PATH_SEPARATOR_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ROOT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="name" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="location" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="level" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="parent" type="org.apache.hadoop.net.Node"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class that implements interface Node]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NodeBase -->
+ <!-- start class org.apache.hadoop.net.ScriptBasedMapping -->
+ <class name="ScriptBasedMapping" extends="org.apache.hadoop.net.CachedDNSToSwitchMapping"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="ScriptBasedMapping"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ScriptBasedMapping" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[This class implements the {@link DNSToSwitchMapping} interface using a
+ script configured via topology.script.file.name .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.ScriptBasedMapping -->
+ <!-- start class org.apache.hadoop.net.SocketInputStream -->
+ <class name="SocketInputStream" extends="java.io.InputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.ReadableByteChannel"/>
+ <constructor name="SocketInputStream" type="java.nio.channels.ReadableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for reading, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), timeout): <br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), socket.getSoTimeout())
+ :<br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.ReadableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by inputstream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferFrom(ReadableByteChannel, long, long)}.]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="waitForReadable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[waits for the underlying channel to be ready for reading.
+ The timeout specified for this stream applies to this wait.
+
+ @throws SocketTimeoutException
+ if select on the channel times out.
+ @throws IOException
+ if any other I/O error occurs.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This implements an input stream that can have a timeout while reading.
+ This sets non-blocking flag on the socket channel.
+ So after create this object, read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} for the associated socket will throw
+ IllegalBlockingModeException.
+ Please use {@link SocketOutputStream} for writing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketInputStream -->
+ <!-- start class org.apache.hadoop.net.SocketOutputStream -->
+ <class name="SocketOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.WritableByteChannel"/>
+ <constructor name="SocketOutputStream" type="java.nio.channels.WritableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for writing, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketOutputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketOutputStream(socket.getChannel(), timeout):<br><br>
+
+ Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketOutputStream#SocketOutputStream(WritableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.WritableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by this stream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="waitForWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[waits for the underlying channel to be ready for writing.
+ The timeout specified for this stream applies to this wait.
+
+ @throws SocketTimeoutException
+ if select on the channel times out.
+ @throws IOException
+ if any other I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="transferToFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileCh" type="java.nio.channels.FileChannel"/>
+ <param name="position" type="long"/>
+ <param name="count" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Transfers data from FileChannel using
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}.
+
+ Similar to readFully(), this waits till requested amount of
+ data is transfered.
+
+ @param fileCh FileChannel to transfer data from.
+ @param position position within the channel where the transfer begins
+ @param count number of bytes to transfer.
+
+ @throws EOFException
+ If end of input file is reached before requested number of
+ bytes are transfered.
+
+ @throws SocketTimeoutException
+ If this channel blocks transfer longer than timeout for
+ this stream.
+
+ @throws IOException Includes any exception thrown by
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This implements an output stream that can have a timeout while writing.
+ This sets non-blocking flag on the socket channel.
+ So after creating this object , read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} on the associated socket will throw
+ llegalBlockingModeException.
+ Please use {@link SocketInputStream} for reading.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketOutputStream -->
+ <!-- start class org.apache.hadoop.net.SocksSocketFactory -->
+ <class name="SocksSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="SocksSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <constructor name="SocksSocketFactory" type="java.net.Proxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with a supplied Proxy
+
+ @param proxy the proxy to use to create sockets]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocksSocketFactory -->
+ <!-- start class org.apache.hadoop.net.StandardSocketFactory -->
+ <class name="StandardSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StandardSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.StandardSocketFactory -->
+</package>
+<package name="org.apache.hadoop.record">
+ <!-- start class org.apache.hadoop.record.BinaryRecordInput -->
+ <class name="BinaryRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="BinaryRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordInput" type="java.io.DataInput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inp" type="java.io.DataInput"/>
+ <doc>
+ <![CDATA[Get a thread-local record input for the supplied DataInput.
+ @param inp data input stream
+ @return binary record input corresponding to the supplied DataInput.]]>
+ </doc>
+ </method>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordInput -->
+ <!-- start class org.apache.hadoop.record.BinaryRecordOutput -->
+ <class name="BinaryRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="BinaryRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordOutput" type="java.io.DataOutput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <doc>
+ <![CDATA[Get a thread-local record output for the supplied DataOutput.
+ @param out data output stream
+ @return binary record output corresponding to the supplied DataOutput.]]>
+ </doc>
+ </method>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordOutput -->
+ <!-- start class org.apache.hadoop.record.Buffer -->
+ <class name="Buffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Buffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-count sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte array as the initial value.
+
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[], int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte range as the initial value.
+
+ @param bytes Copy of this array becomes the backing storage for the object.
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Use the specified bytes array as underlying sequence.
+
+ @param bytes byte sequence]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Copy the specified byte array to the Buffer. Replaces the current buffer.
+
+ @param bytes byte array to be assigned
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the Buffer.
+
+ @return The data is only valid between 0 and getCount() - 1.]]>
+ </doc>
+ </method>
+ <method name="getCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current count of the buffer.]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum count that could handled without
+ resizing the backing storage.
+
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newCapacity" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved if newCapacity >= getCount().
+ @param newCapacity The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the buffer to 0 size]]>
+ </doc>
+ </method>
+ <method name="truncate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Change the capacity of the backing store to be the same as the current
+ count of buffer.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer.
+
+ @param bytes byte array to be appended
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer
+
+ @param bytes byte array to be appended]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Define the sort order of the Buffer.
+
+ @param other The other buffer
+ @return Positive if this is bigger than other, 0 if they are equal, and
+ negative if this is smaller than other.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="charsetName" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ <doc>
+ <![CDATA[Convert the byte buffer to a string an specific character encoding
+
+ @param charsetName Valid Java Character Set Name]]>
+ </doc>
+ </method>
+ <method name="clone" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="CloneNotSupportedException" type="java.lang.CloneNotSupportedException"/>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is used as a Java native type for buffer.
+ It is resizable and distinguishes between the count of the seqeunce and
+ the current capacity.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Buffer -->
+ <!-- start class org.apache.hadoop.record.CsvRecordInput -->
+ <class name="CsvRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="CsvRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordInput -->
+ <!-- start class org.apache.hadoop.record.CsvRecordOutput -->
+ <class name="CsvRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="CsvRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordOutput -->
+ <!-- start interface org.apache.hadoop.record.Index -->
+ <interface name="Index" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="done" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="incr"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Interface that acts as an iterator for deserializing maps.
+ The deserializer returns an instance that the record uses to
+ read vectors and maps. An example of usage is as follows:
+
+ <code>
+ Index idx = startVector(...);
+ while (!idx.done()) {
+ .... // read element of a vector
+ idx.incr();
+ }
+ </code>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.Index -->
+ <!-- start class org.apache.hadoop.record.Record -->
+ <class name="Record" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Record"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="serialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record with tag (ususally field name)
+ @param rout Record output destination
+ @param tag record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record with a tag (usually field name)
+ @param rin Record input source
+ @param tag Record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record without a tag
+ @param rout Record output destination]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record without a tag
+ @param rin Record input source]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="din" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Abstract class that is extended by generated classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Record -->
+ <!-- start class org.apache.hadoop.record.RecordComparator -->
+ <class name="RecordComparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordComparator" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a raw {@link Record} comparison implementation.]]>
+ </doc>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.record.RecordComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link Record} implementation.
+
+ @param c record classs for which a raw comparator is provided
+ @param comparator Raw comparator instance for class c]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A raw record comparator base class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.RecordComparator -->
+ <!-- start interface org.apache.hadoop.record.RecordInput -->
+ <interface name="RecordInput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a byte from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a boolean from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a long integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a single-precision float from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a double-precision number from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read byte array from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of elements.]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of map entries.]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that all the Deserializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordInput -->
+ <!-- start interface org.apache.hadoop.record.RecordOutput -->
+ <interface name="RecordOutput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a byte to serialized record.
+ @param b Byte to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a boolean to serialized record.
+ @param b Boolean to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write an integer to serialized record.
+ @param i Integer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a long integer to serialized record.
+ @param l Long to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a single-precision float to serialized record.
+ @param f Float to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a double precision floating point number to serialized record.
+ @param d Double to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a unicode string to serialized record.
+ @param s String to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a buffer to serialized record.
+ @param buf Buffer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a record to be serialized.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized record.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a vector to be serialized.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized vector.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a map to be serialized.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized map.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that alll the serializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordOutput -->
+ <!-- start class org.apache.hadoop.record.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a stream and return it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a stream and returns it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an int to a binary stream with zero-compressed encoding.
+
+ @param stream Binary output stream
+ @param i int to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <field name="hexchars" type="char[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Utils -->
+ <!-- start class org.apache.hadoop.record.XmlRecordInput -->
+ <class name="XmlRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="XmlRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Deserializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordInput -->
+ <!-- start class org.apache.hadoop.record.XmlRecordOutput -->
+ <class name="XmlRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="XmlRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Serializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordOutput -->
+</package>
+<package name="org.apache.hadoop.record.compiler">
+ <!-- start class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <class name="CodeBuffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A wrapper around StringBuffer that automatically does indentation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.Consts -->
+ <class name="Consts" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="RIO_PREFIX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_VAR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER_FIELDS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_OUTPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_INPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TAG" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[const definitions for Record I/O compiler]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.Consts -->
+ <!-- start class org.apache.hadoop.record.compiler.JBoolean -->
+ <class name="JBoolean" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBoolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBoolean]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBoolean -->
+ <!-- start class org.apache.hadoop.record.compiler.JBuffer -->
+ <class name="JBuffer" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBuffer]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "buffer" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.JByte -->
+ <class name="JByte" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JByte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "byte" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JByte -->
+ <!-- start class org.apache.hadoop.record.compiler.JDouble -->
+ <class name="JDouble" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JDouble"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JDouble]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JDouble -->
+ <!-- start class org.apache.hadoop.record.compiler.JField -->
+ <class name="JField" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JField" type="java.lang.String, T"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JField]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[A thin wrappper around record field.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JField -->
+ <!-- start class org.apache.hadoop.record.compiler.JFile -->
+ <class name="JFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFile" type="java.lang.String, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JFile&gt;, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFile
+
+ @param name possibly full pathname to the file
+ @param inclFiles included files (as JFile)
+ @param recList List of records defined within this file]]>
+ </doc>
+ </constructor>
+ <method name="genCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <param name="destDir" type="java.lang.String"/>
+ <param name="options" type="java.util.ArrayList&lt;java.lang.String&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate record code in given language. Language should be all
+ lowercase.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Container for the Hadoop Record DDL.
+ The main components of the file are filename, list of included files,
+ and records defined in that file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFile -->
+ <!-- start class org.apache.hadoop.record.compiler.JFloat -->
+ <class name="JFloat" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFloat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFloat]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFloat -->
+ <!-- start class org.apache.hadoop.record.compiler.JInt -->
+ <class name="JInt" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JInt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JInt]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "int" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JInt -->
+ <!-- start class org.apache.hadoop.record.compiler.JLong -->
+ <class name="JLong" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JLong"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JLong]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "long" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JLong -->
+ <!-- start class org.apache.hadoop.record.compiler.JMap -->
+ <class name="JMap" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JMap" type="org.apache.hadoop.record.compiler.JType, org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JMap]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JMap -->
+ <!-- start class org.apache.hadoop.record.compiler.JRecord -->
+ <class name="JRecord" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JRecord" type="java.lang.String, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JField&lt;org.apache.hadoop.record.compiler.JType&gt;&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JRecord]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JRecord -->
+ <!-- start class org.apache.hadoop.record.compiler.JString -->
+ <class name="JString" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JString"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JString]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JString -->
+ <!-- start class org.apache.hadoop.record.compiler.JType -->
+ <class name="JType" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Abstract Base class for all types supported by Hadoop Record I/O.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JType -->
+ <!-- start class org.apache.hadoop.record.compiler.JVector -->
+ <class name="JVector" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JVector" type="org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JVector]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JVector -->
+</package>
+<package name="org.apache.hadoop.record.compiler.ant">
+ <!-- start class org.apache.hadoop.record.compiler.ant.RccTask -->
+ <class name="RccTask" extends="org.apache.tools.ant.Task"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RccTask"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of RccTask]]>
+ </doc>
+ </constructor>
+ <method name="setLanguage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the output language option
+ @param language "java"/"c++"]]>
+ </doc>
+ </method>
+ <method name="setFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets the record definition file attribute
+ @param file record definition file]]>
+ </doc>
+ </method>
+ <method name="setFailonerror"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="flag" type="boolean"/>
+ <doc>
+ <![CDATA[Given multiple files (via fileset), set the error handling behavior
+ @param flag true will throw build exception in case of failure (default)]]>
+ </doc>
+ </method>
+ <method name="setDestdir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets directory where output files will be generated
+ @param dir output directory]]>
+ </doc>
+ </method>
+ <method name="addFileset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="set" type="org.apache.tools.ant.types.FileSet"/>
+ <doc>
+ <![CDATA[Adds a fileset that can consist of one or more files
+ @param set Set of record definition files]]>
+ </doc>
+ </method>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="BuildException" type="org.apache.tools.ant.BuildException"/>
+ <doc>
+ <![CDATA[Invoke the Hadoop record compiler on each record definition file]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Hadoop record compiler ant Task
+<p> This task takes the given record definition files and compiles them into
+ java or c++
+ files. It is then up to the user to compile the generated files.
+
+ <p> The task requires the <code>file</code> or the nested fileset element to be
+ specified. Optional attributes are <code>language</code> (set the output
+ language, default is "java"),
+ <code>destdir</code> (name of the destination directory for generated java/c++
+ code, default is ".") and <code>failonerror</code> (specifies error handling
+ behavior. default is true).
+ <p><h4>Usage</h4>
+ <pre>
+ &lt;recordcc
+ destdir="${basedir}/gensrc"
+ language="java"&gt;
+ &lt;fileset include="**\/*.jr" /&gt;
+ &lt;/recordcc&gt;
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.ant.RccTask -->
+</package>
+<package name="org.apache.hadoop.record.compiler.generated">
+ <!-- start class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <class name="ParseException" extends="java.lang.Exception"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ParseException" type="org.apache.hadoop.record.compiler.generated.Token, int[][], java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constructor is used by the method "generateParseException"
+ in the generated parser. Calling this constructor generates
+ a new object of this type with the fields "currentToken",
+ "expectedTokenSequences", and "tokenImage" set. The boolean
+ flag "specialConstructor" is also set to true to indicate that
+ this constructor was used to create this object.
+ This constructor calls its super class with the empty string
+ to force the "toString" method of parent class "Throwable" to
+ print the error message in the form:
+ ParseException: <result of getMessage>]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The following constructors are for use by you for whatever
+ purpose you can think of. Constructing the exception in this
+ manner makes the exception behave in the normal way - i.e., as
+ documented in the class "Throwable". The fields "errorToken",
+ "expectedTokenSequences", and "tokenImage" do not contain
+ relevant information. The JavaCC generated code does not use
+ these constructors.]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method has the standard behavior when this object has been
+ created using the standard constructors. Otherwise, it uses
+ "currentToken" and "expectedTokenSequences" to generate a parse
+ error message and returns it. If this object has been created
+ due to a parse error, and you do not catch it (it gets thrown
+ from the parser), then this method is called during the printing
+ of the final stack trace, and hence the correct error message
+ gets displayed.]]>
+ </doc>
+ </method>
+ <method name="add_escapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Used to convert raw characters to their escaped version
+ when these raw version cannot be used as part of an ASCII
+ string literal.]]>
+ </doc>
+ </method>
+ <field name="specialConstructor" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This variable determines which constructor was used to create
+ this object and thereby affects the semantics of the
+ "getMessage" method (see below).]]>
+ </doc>
+ </field>
+ <field name="currentToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is the last token that has been consumed successfully. If
+ this object has been created due to a parse error, the token
+ followng this token will (therefore) be the first error token.]]>
+ </doc>
+ </field>
+ <field name="expectedTokenSequences" type="int[][]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Each entry in this array is an array of integers. Each array
+ of integers represents a sequence of tokens (by their ordinal
+ values) that is expected at this point of the parse.]]>
+ </doc>
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is a reference to the "tokenImage" array of the generated
+ parser within which the parse error occurred. This array is
+ defined in the generated ...Constants interface.]]>
+ </doc>
+ </field>
+ <field name="eol" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The end of line string for this machine.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This exception is thrown when parse errors are encountered.
+ You can explicitly create objects of this exception type by
+ calling the method generateParseException in the generated
+ parser.
+
+ You can modify this class to customize your error reporting
+ mechanisms so long as you retain the public fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <class name="Rcc" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="Rcc" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="usage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="driver" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="Input" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Include" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Module" return="java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ModuleName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="RecordList" return="java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Record" return="org.apache.hadoop.record.compiler.JRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Field" return="org.apache.hadoop.record.compiler.JField&lt;org.apache.hadoop.record.compiler.JType&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Type" return="org.apache.hadoop.record.compiler.JType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Map" return="org.apache.hadoop.record.compiler.JMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Vector" return="org.apache.hadoop.record.compiler.JVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tm" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"/>
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <method name="generateParseException" return="org.apache.hadoop.record.compiler.generated.ParseException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="enable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="disable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="token_source" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="token" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jj_nt" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <!-- start interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <interface name="RccConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="EOF" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MODULE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCLUDE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BOOLEAN_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SEMICOLON_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CSTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IDENT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinOneLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinMultiLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <class name="RccTokenManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setDebugStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ds" type="java.io.PrintStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="SwitchTo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="jjFillToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="debugStream" type="java.io.PrintStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjstrLiteralImages" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="lexStateNames" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjnewLexState" type="int[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="input_stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="curChar" type="char"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <class name="SimpleCharStream" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setTabSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="getTabSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="ExpandBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="wrapAround" type="boolean"/>
+ </method>
+ <method name="FillBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="BeginToken" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="UpdateLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="c" type="char"/>
+ </method>
+ <method name="readChar" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getEndColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getEndLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="backup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="amount" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="GetImage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="GetSuffix" return="char[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="int"/>
+ </method>
+ <method name="Done"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="adjustBeginLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newLine" type="int"/>
+ <param name="newCol" type="int"/>
+ <doc>
+ <![CDATA[Method to adjust line and column numbers for the start of a token.]]>
+ </doc>
+ </method>
+ <field name="staticFlag" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufpos" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufline" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufcolumn" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="column" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="line" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsCR" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsLF" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputStream" type="java.io.Reader"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="buffer" type="char[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="maxNextCharInd" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inBuf" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="tabSize" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of interface CharStream, where the stream is assumed to
+ contain only ASCII characters (without unicode processing).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Token -->
+ <class name="Token" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Token"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the image.]]>
+ </doc>
+ </method>
+ <method name="newToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="ofKind" type="int"/>
+ <doc>
+ <![CDATA[Returns a new Token object, by default. However, if you want, you
+ can create and return subclass objects based on the value of ofKind.
+ Simply add the cases to the switch for all those special cases.
+ For example, if you have a subclass of Token called IDToken that
+ you want to create if ofKind is ID, simlpy add something like :
+
+ case MyParserConstants.ID : return new IDToken();
+
+ to the following switch statement. Then you can cast matchedToken
+ variable to the appropriate type and use it in your lexical actions.]]>
+ </doc>
+ </method>
+ <field name="kind" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[An integer that describes the kind of this token. This numbering
+ system is determined by JavaCCParser, and a table of these numbers is
+ stored in the file ...Constants.java.]]>
+ </doc>
+ </field>
+ <field name="beginLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="beginColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="image" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The string image of the token.]]>
+ </doc>
+ </field>
+ <field name="next" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A reference to the next regular (non-special) token from the input
+ stream. If this is the last token from the input stream, or if the
+ token manager has not read tokens beyond this one, this field is
+ set to null. This is true only if this token is also a regular
+ token. Otherwise, see below for a description of the contents of
+ this field.]]>
+ </doc>
+ </field>
+ <field name="specialToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This field is used to access special tokens that occur prior to this
+ token, but after the immediately preceding regular (non-special) token.
+ If there are no such special tokens, this field is set to null.
+ When there are more than one such special token, this field refers
+ to the last of these special tokens, which in turn refers to the next
+ previous special token through its specialToken field, and so on
+ until the first special token (whose specialToken field is null).
+ The next fields of special tokens refer to other special tokens that
+ immediately follow it (without an intervening regular token). If there
+ is no such token, this field is null.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Describes the input token stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Token -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+ <class name="TokenMgrError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TokenMgrError"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="boolean, int, int, int, java.lang.String, char, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addEscapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Replaces unprintable characters by their espaced (or unicode escaped)
+ equivalents in the given string]]>
+ </doc>
+ </method>
+ <method name="LexicalError" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="EOFSeen" type="boolean"/>
+ <param name="lexState" type="int"/>
+ <param name="errorLine" type="int"/>
+ <param name="errorColumn" type="int"/>
+ <param name="errorAfter" type="java.lang.String"/>
+ <param name="curChar" type="char"/>
+ <doc>
+ <![CDATA[Returns a detailed message for the Error when it is thrown by the
+ token manager to indicate a lexical error.
+ Parameters :
+ EOFSeen : indicates if EOF caused the lexicl error
+ curLexState : lexical state in which this error occured
+ errorLine : line number when the error occured
+ errorColumn : column number when the error occured
+ errorAfter : prefix that was seen before this error occured
+ curchar : the offending character
+ Note: You can customize the lexical error message by modifying this method.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[You can also modify the body of this method to customize your error messages.
+ For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not
+ of end-users concern, so you can return something like :
+
+ "Internal Error : Please file a bug report .... "
+
+ from this method for such cases in the release version of your parser.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+</package>
+<package name="org.apache.hadoop.record.meta">
+ <!-- start class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <class name="FieldTypeInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's TypeID object]]>
+ </doc>
+ </method>
+ <method name="getFieldID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's id (name)]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two FieldTypeInfos are equal if ach of their fields matches]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ti" type="org.apache.hadoop.record.meta.FieldTypeInfo"/>
+ </method>
+ <doc>
+ <![CDATA[Represents a type information for a field, which is made up of its
+ ID (name) and its type (a TypeID object).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.MapTypeID -->
+ <class name="MapTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapTypeID" type="org.apache.hadoop.record.meta.TypeID, org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getKeyTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's key element]]>
+ </doc>
+ </method>
+ <method name="getValueTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's value element]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two map typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a Map]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.MapTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <class name="RecordTypeInfo" extends="org.apache.hadoop.record.Record"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty RecordTypeInfo object.]]>
+ </doc>
+ </constructor>
+ <constructor name="RecordTypeInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a RecordTypeInfo object representing a record with the given name
+ @param name Name of the record]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the name of the record]]>
+ </doc>
+ </method>
+ <method name="setName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[set the name of the record]]>
+ </doc>
+ </method>
+ <method name="addField"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fieldName" type="java.lang.String"/>
+ <param name="tid" type="org.apache.hadoop.record.meta.TypeID"/>
+ <doc>
+ <![CDATA[Add a field.
+ @param fieldName Name of the field
+ @param tid Type ID of the field]]>
+ </doc>
+ </method>
+ <method name="getFieldTypeInfos" return="java.util.Collection&lt;org.apache.hadoop.record.meta.FieldTypeInfo&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a collection of field type infos]]>
+ </doc>
+ </method>
+ <method name="getNestedStructTypeInfo" return="org.apache.hadoop.record.meta.RecordTypeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the type info of a nested record. We only consider nesting
+ to one level.
+ @param name Name of the nested record]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer_" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ <doc>
+ <![CDATA[This class doesn't implement Comparable as it's not meant to be used
+ for anything besides de/serializing.
+ So we always throw an exception.
+ Not implemented. Always returns 0 if another RecordTypeInfo is passed in.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A record's Type Information object which can read/write itself.
+
+ Type information for a record comprises metadata about the record,
+ as well as a collection of type information for each field in the record.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.StructTypeID -->
+ <class name="StructTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StructTypeID" type="org.apache.hadoop.record.meta.RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a StructTypeID based on the RecordTypeInfo of some record]]>
+ </doc>
+ </constructor>
+ <method name="getFieldTypeInfos" return="java.util.Collection&lt;org.apache.hadoop.record.meta.FieldTypeInfo&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a struct]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.StructTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID -->
+ <class name="TypeID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeVal" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type value. One of the constants in RIOType.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two base typeIDs are equal if they refer to the same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <field name="BoolTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constant classes for the basic types, so we can share them.]]>
+ </doc>
+ </field>
+ <field name="BufferTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ByteTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DoubleTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FloatTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IntTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LongTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="StringTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="typeVal" type="byte"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Represents typeID for basic types.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <class name="TypeID.RIOType" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TypeID.RIOType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <field name="BOOL" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRUCT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[constants representing the IDL types we support]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <!-- start class org.apache.hadoop.record.meta.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <param name="typeID" type="org.apache.hadoop.record.meta.TypeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[read/skip bytes from stream based on a type]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O platform.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.Utils -->
+ <!-- start class org.apache.hadoop.record.meta.VectorTypeID -->
+ <class name="VectorTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VectorTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getElementTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two vector typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for vector.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.VectorTypeID -->
+</package>
+<package name="org.apache.hadoop.security">
+ <!-- start class org.apache.hadoop.security.AccessControlException -->
+ <class name="AccessControlException" extends="org.apache.hadoop.fs.permission.AccessControlException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AccessControlException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor is needed for unwrapping from
+ {@link org.apache.hadoop.ipc.RemoteException}.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an {@link AccessControlException}
+ with the specified detail message.
+ @param s the detail message.]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[An exception class for access control related issues.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.AccessControlException -->
+ <!-- start class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <class name="UnixUserGroupInformation" extends="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnixUserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameters user name and its group names.
+ The first entry in the groups list is the default group.
+
+ @param userName a user's name
+ @param groupNames groups list, first of which is the default group
+ @exception IllegalArgumentException if any argument is null]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameter user/group names
+
+ @param ugi an array containing user/group names, the first
+ element of which is the user name, the second of
+ which is the default group name.
+ @exception IllegalArgumentException if the array size is less than 2
+ or any element is null.]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Create an immutable {@link UnixUserGroupInformation} object.]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an array of group names]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the user's name]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize this object
+ First check if this is a UGI in the string format.
+ If no, throw an IOException; otherwise
+ set this object's fields by reading them from the given data input
+
+ @param in input stream
+ @exception IOException is thrown if encounter any error when reading]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize this object
+ First write a string marking that this is a UGI in the string format,
+ then write this object's serialized form to the given data output
+
+ @param out output stream
+ @exception IOException if encounter any error during writing]]>
+ </doc>
+ </method>
+ <method name="saveToConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <param name="ugi" type="org.apache.hadoop.security.UnixUserGroupInformation"/>
+ <doc>
+ <![CDATA[Store the given <code>ugi</code> as a comma separated string in
+ <code>conf</code> as a property <code>attr</code>
+
+ The String starts with the user name followed by the default group names,
+ and other group names.
+
+ @param conf configuration
+ @param attr property name
+ @param ugi a UnixUserGroupInformation]]>
+ </doc>
+ </method>
+ <method name="readFromConf" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Read a UGI from the given <code>conf</code>
+
+ The object is expected to store with the property name <code>attr</code>
+ as a comma separated string that starts
+ with the user name followed by group names.
+ If the property name is not defined, return null.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise, construct a UGI from the configuration, store it in the
+ ugi map and return it.
+
+ @param conf configuration
+ @param attr property name
+ @return a UnixUGI
+ @throws LoginException if the stored string is ill-formatted.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get current user's name and the names of all its groups from Unix.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise get the current user's information from Unix, store it
+ in the map, and return it.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Equivalent to login(conf, false).]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="save" type="boolean"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get a user's name & its group names from the given configuration;
+ If it is not defined in the configuration, get the current user's
+ information from Unix.
+ If the user has a UGI in the ugi map, return the one in
+ the UGI map.
+
+ @param conf either a job configuration or client's configuration
+ @param save saving it to conf?
+ @return UnixUserGroupInformation a user/group information
+ @exception LoginException if not able to get the user/group information]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Decide if two UGIs are the same
+
+ @param other other object
+ @return true if they are the same; false otherwise.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code for this UGI.
+ The hash code for a UGI is the hash code of its user name string.
+
+ @return a hash code value for this UGI.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this object to a string
+
+ @return a comma separated string containing the user name and group names]]>
+ </doc>
+ </method>
+ <field name="UGI_PROPERTY_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of UserGroupInformation in the Unix system]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <!-- start class org.apache.hadoop.security.UserGroupInformation -->
+ <class name="UserGroupInformation" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="UserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCurrentUGI" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="setCurrentUGI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <doc>
+ <![CDATA[Set the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get username
+
+ @return the user's name]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the groups that the user belong to
+
+ @return an array of group names]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Login and return a UserGroupInformation object.]]>
+ </doc>
+ </method>
+ <method name="readFrom" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link UserGroupInformation} from conf]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A {@link Writable} abstract class for storing user and groups information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UserGroupInformation -->
+</package>
+<package name="org.apache.hadoop.tools">
+ <!-- start class org.apache.hadoop.tools.DistCp -->
+ <class name="DistCp" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="DistCp" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="destPath" type="java.lang.String"/>
+ <param name="logPath" type="org.apache.hadoop.fs.Path"/>
+ <param name="srcAsList" type="boolean"/>
+ <param name="ignoreReadFailures" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[This is the main driver for recursively copying directories
+ across file systems. It takes at least two cmdline parameters. A source
+ URL and a destination URL. It then essentially does an "ls -lR" on the
+ source URL, and writes the output in a round-robin manner to all the map
+ input files. The mapper actually copies the files allotted to it. The
+ reduce is empty.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="getRandomId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Map-reduce program to recursively copy directories between
+ different file-systems.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCp -->
+ <!-- start class org.apache.hadoop.tools.DistCp.DuplicationException -->
+ <class name="DistCp.DuplicationException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="ERROR_CODE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Error code for this exception]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An exception class for duplicated source files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCp.DuplicationException -->
+ <!-- start class org.apache.hadoop.tools.HadoopArchives -->
+ <class name="HadoopArchives" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="HadoopArchives" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="archive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPaths" type="java.util.List&lt;org.apache.hadoop.fs.Path&gt;"/>
+ <param name="archiveName" type="java.lang.String"/>
+ <param name="dest" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[archive the given source paths into
+ the dest
+ @param srcPaths the src paths to be archived
+ @param dest the dest dir that will contain the archive]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[the main driver for creating the archives
+ it takes at least two command line parameters. The src and the
+ dest. It does an lsr on the source paths.
+ The mapper created archuves and the reducer creates
+ the archive index.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[the main functions]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[a archive creation utility.
+ This class provides methods that can be used
+ to create hadoop archives. For understanding of
+ Hadoop archives look at {@link HarFileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.HadoopArchives -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer -->
+ <class name="Logalyzer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Logalyzer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doArchive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logListURI" type="java.lang.String"/>
+ <param name="archiveDirectory" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doArchive: Workhorse function to archive log-files.
+ @param logListURI : The uri which will serve list of log-files to archive.
+ @param archiveDirectory : The directory to store archived logfiles.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="doAnalyze"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFilesDirectory" type="java.lang.String"/>
+ <param name="outputDirectory" type="java.lang.String"/>
+ <param name="grepPattern" type="java.lang.String"/>
+ <param name="sortColumns" type="java.lang.String"/>
+ <param name="columnSeparator" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doAnalyze:
+ @param inputFilesDirectory : Directory containing the files to be analyzed.
+ @param outputDirectory : Directory to store analysis (output).
+ @param grepPattern : Pattern to *grep* for.
+ @param sortColumns : Sort specification for output.
+ @param columnSeparator : Column separator.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[Logalyzer: A utility tool for archiving and analyzing hadoop logs.
+ <p>
+ This tool supports archiving and anaylzing (sort/grep) of log-files.
+ It takes as input
+ a) Input uri which will serve uris of the logs to be archived.
+ b) Output directory (not mandatory).
+ b) Directory on dfs to archive the logs.
+ c) The sort/grep patterns for analyzing the files and separator for boundaries.
+ Usage:
+ Logalyzer -archive -archiveDir <directory to archive logs> -analysis <directory> -logs <log-list uri> -grep <pattern> -sort <col1, col2> -separator <separator>
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <class name="Logalyzer.LogComparator" extends="org.apache.hadoop.io.Text.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Logalyzer.LogComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys of the logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+ <class name="Logalyzer.LogRegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="Logalyzer.LogRegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+</package>
+<package name="org.apache.hadoop.util">
+ <!-- start class org.apache.hadoop.util.Daemon -->
+ <class name="Daemon" extends="java.lang.Thread"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Daemon"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.ThreadGroup, java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread to be part of a specified thread group.]]>
+ </doc>
+ </constructor>
+ <method name="getRunnable" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A thread that has called {@link Thread#setDaemon(boolean) } with true.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Daemon -->
+ <!-- start class org.apache.hadoop.util.DataChecksum -->
+ <class name="DataChecksum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.zip.Checksum"/>
+ <method name="newDataChecksum" return="org.apache.hadoop.util.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="int"/>
+ <param name="bytesPerChecksum" type="int"/>
+ </method>
+ <method name="newDataChecksum" return="org.apache.hadoop.util.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <doc>
+ <![CDATA[Creates a DataChecksum from HEADER_LEN bytes from arr[offset].
+ @return DataChecksum of the type in the array or null in case of an error.]]>
+ </doc>
+ </method>
+ <method name="newDataChecksum" return="org.apache.hadoop.util.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This constructucts a DataChecksum by reading HEADER_LEN bytes from
+ input stream <i>in</i>]]>
+ </doc>
+ </method>
+ <method name="writeHeader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the checksum header to the output stream <i>out</i>.]]>
+ </doc>
+ </method>
+ <method name="getHeader" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="writeValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="reset" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the current checksum to the stream.
+ If <i>reset</i> is true, then resets the checksum.
+ @return number of bytes written. Will be equal to getChecksumSize();]]>
+ </doc>
+ </method>
+ <method name="writeValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="reset" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the current checksum to a buffer.
+ If <i>reset</i> is true, then resets the checksum.
+ @return number of bytes written. Will be equal to getChecksumSize();]]>
+ </doc>
+ </method>
+ <method name="compare" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <doc>
+ <![CDATA[Compares the checksum located at buf[offset] with the current checksum.
+ @return true if the checksum matches and false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getChecksumType" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getChecksumSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesPerChecksum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumBytesInSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getChecksumHeaderSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getValue" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ </method>
+ <field name="HEADER_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKSUM_NULL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKSUM_CRC32" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SIZE_OF_INTEGER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class provides inteface and utilities for processing checksums for
+ DFS data transfers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.DataChecksum -->
+ <!-- start class org.apache.hadoop.util.DiskChecker -->
+ <class name="DiskChecker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mkdirsWithExistsCheck" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[The semantics of mkdirsWithExistsCheck method is different from the mkdirs
+ method provided in the Sun's java.io.File class in the following way:
+ While creating the non-existent parent directories, this method checks for
+ the existence of those directories if the mkdir fails at any point (since
+ that directory might have just been created by some other process).
+ If both mkdir() and the exists() check fails for any seemingly
+ non-existent directory, then we signal an error; Sun's mkdir would signal
+ an error (return false) if a directory it is attempting to create already
+ exists or the mkdir fails.
+ @param dir
+ @return true on success, false on failure]]>
+ </doc>
+ </method>
+ <method name="checkDir"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ </method>
+ <doc>
+ <![CDATA[Class that provides utility functions for checking disk problem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <class name="DiskChecker.DiskErrorException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskErrorException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <class name="DiskChecker.DiskOutOfSpaceException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskOutOfSpaceException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <!-- start class org.apache.hadoop.util.GenericOptionsParser -->
+ <class name="GenericOptionsParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser<code> to parse only the generic Hadoop
+ arguments.
+
+ The array of string arguments other than the generic arguments can be
+ obtained by {@link #getRemainingArgs()}.
+
+ @param conf the <code>Configuration</code> to modify.
+ @param args command-line arguments.]]>
+ </doc>
+ </constructor>
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, org.apache.commons.cli.Options, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser</code> to parse given options as well
+ as generic Hadoop options.
+
+ The resulting <code>CommandLine</code> object can be obtained by
+ {@link #getCommandLine()}.
+
+ @param conf the configuration to modify
+ @param options options built by the caller
+ @param args User-specified arguments]]>
+ </doc>
+ </constructor>
+ <method name="getRemainingArgs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array of Strings containing only application-specific arguments.
+
+ @return array of <code>String</code>s containing the un-parsed arguments
+ or <strong>empty array</strong> if commandLine was not defined.]]>
+ </doc>
+ </method>
+ <method name="getCommandLine" return="org.apache.commons.cli.CommandLine"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the commons-cli <code>CommandLine</code> object
+ to process the parsed arguments.
+
+ Note: If the object is created with
+ {@link #GenericOptionsParser(Configuration, String[])}, then returned
+ object will only contain parsed generic options.
+
+ @return <code>CommandLine</code> representing list of arguments
+ parsed against Options descriptor.]]>
+ </doc>
+ </method>
+ <method name="getLibJars" return="java.net.URL[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If libjars are set in the conf, parse the libjars.
+ @param conf
+ @return libjar urls
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Print the usage message for generic command-line options supported.
+
+ @param out stream to print the usage message to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>GenericOptionsParser</code> is a utility to parse command line
+ arguments generic to the Hadoop framework.
+
+ <code>GenericOptionsParser</code> recognizes several standarad command
+ line arguments, enabling applications to easily specify a namenode, a
+ jobtracker, additional configuration resources etc.
+
+ <h4 id="GenericOptions">Generic Options</h4>
+
+ <p>The supported generic options are:</p>
+ <p><blockquote><pre>
+ -conf &lt;configuration file&gt; specify a configuration file
+ -D &lt;property=value&gt; use value for given property
+ -fs &lt;local|namenode:port&gt; specify a namenode
+ -jt &lt;local|jobtracker:port&gt; specify a job tracker
+ -files &lt;comma separated list of files&gt; specify comma separated
+ files to be copied to the map reduce cluster
+ -libjars &lt;comma separated list of jars&gt; specify comma separated
+ jar files to include in the classpath.
+ -archives &lt;comma separated list of archives&gt; specify comma
+ separated archives to be unarchived on the compute machines.
+
+ </pre></blockquote></p>
+
+ <p>The general command line syntax is:</p>
+ <p><tt><pre>
+ bin/hadoop command [genericOptions] [commandOptions]
+ </pre></tt></p>
+
+ <p>Generic command line arguments <strong>might</strong> modify
+ <code>Configuration </code> objects, given to constructors.</p>
+
+ <p>The functionality is implemented using Commons CLI.</p>
+
+ <p>Examples:</p>
+ <p><blockquote><pre>
+ $ bin/hadoop dfs -fs darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -conf hadoop-site.xml -ls /data
+ list /data directory in dfs with conf specified in hadoop-site.xml
+
+ $ bin/hadoop job -D mapred.job.tracker=darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt local -submit job.xml
+ submit a job to local runner
+
+ $ bin/hadoop jar -libjars testlib.jar
+ -archives test.tgz -files file.txt inputjar args
+ job submission with libjars, files and archives
+ </pre></blockquote></p>
+
+ @see Tool
+ @see ToolRunner]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericOptionsParser -->
+ <!-- start class org.apache.hadoop.util.GenericsUtil -->
+ <class name="GenericsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericsUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClass" return="java.lang.Class&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <doc>
+ <![CDATA[Returns the Class object (of type <code>Class&lt;T&gt;</code>) of the
+ argument of type <code>T</code>.
+ @param <T> The type of the argument
+ @param t the object to get it class
+ @return <code>Class&lt;T&gt;</code>]]>
+ </doc>
+ </method>
+ <method name="toArray" return="T[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <param name="list" type="java.util.List&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param c the Class object of the items in the list
+ @param list the list to convert]]>
+ </doc>
+ </method>
+ <method name="toArray" return="T[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="list" type="java.util.List&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param list the list to convert
+ @throws ArrayIndexOutOfBoundsException if the list is empty.
+ Use {@link #toArray(Class, List)} if the list may be empty.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Contains utility methods for dealing with Java Generics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericsUtil -->
+ <!-- start class org.apache.hadoop.util.HeapSort -->
+ <class name="HeapSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="HeapSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the given range of items using heap sort.
+ {@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of HeapSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.HeapSort -->
+ <!-- start class org.apache.hadoop.util.HostsFileReader -->
+ <class name="HostsFileReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HostsFileReader" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="refresh"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getHosts" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExcludedHosts" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setIncludesFile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="includesFile" type="java.lang.String"/>
+ </method>
+ <method name="setExcludesFile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="excludesFile" type="java.lang.String"/>
+ </method>
+ <method name="updateFileNames"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="includesFile" type="java.lang.String"/>
+ <param name="excludesFile" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.util.HostsFileReader -->
+ <!-- start interface org.apache.hadoop.util.IndexedSortable -->
+ <interface name="IndexedSortable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Compare items at the given addresses consistent with the semantics of
+ {@link java.util.Comparable#compare}.]]>
+ </doc>
+ </method>
+ <method name="swap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Swap items at the given addresses.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for collections capable of being sorted by {@link IndexedSorter}
+ algorithms.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSortable -->
+ <!-- start interface org.apache.hadoop.util.IndexedSorter -->
+ <interface name="IndexedSorter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the items accessed through the given IndexedSortable over the given
+ range of logical indices. From the perspective of the sort algorithm,
+ each index between l (inclusive) and r (exclusive) is an addressable
+ entry.
+ @see IndexedSortable#compare
+ @see IndexedSortable#swap]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Same as {@link #sort(IndexedSortable,int,int)}, but indicate progress
+ periodically.
+ @see #sort(IndexedSortable,int,int)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for sort algorithms accepting {@link IndexedSortable} items.
+
+ A sort algorithm implementing this interface may only
+ {@link IndexedSortable#compare} and {@link IndexedSortable#swap} items
+ for a range of indices to effect a sort across that range.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSorter -->
+ <!-- start class org.apache.hadoop.util.LineReader -->
+ <class name="LineReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LineReader" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ default buffer-size (64k).
+ @param in The input stream
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="LineReader" type="java.io.InputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ given buffer-size.
+ @param in The input stream
+ @param bufferSize Size of the read buffer
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ <code>io.file.buffer.size</code> specified in the given
+ <code>Configuration</code>.
+ @param in input stream
+ @param conf configuration
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the underlying stream.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <param name="maxLineLength" type="int"/>
+ <param name="maxBytesToConsume" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @param maxLineLength the maximum number of bytes to store into str.
+ @param maxBytesToConsume the maximum number of bytes to consume in this call.
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <param name="maxLineLength" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @param maxLineLength the maximum number of bytes to store into str.
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides a line reader from an input stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.LineReader -->
+ <!-- start class org.apache.hadoop.util.MergeSort -->
+ <class name="MergeSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MergeSort" type="java.util.Comparator&lt;org.apache.hadoop.io.IntWritable&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mergeSort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="int[]"/>
+ <param name="dest" type="int[]"/>
+ <param name="low" type="int"/>
+ <param name="high" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of MergeSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.MergeSort -->
+ <!-- start class org.apache.hadoop.util.NativeCodeLoader -->
+ <class name="NativeCodeLoader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeCodeLoader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeCodeLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if native-hadoop code is loaded for this platform.
+
+ @return <code>true</code> if native-hadoop is loaded,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getLoadNativeLibraries" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return if native hadoop libraries, if present, can be used for this job.
+ @param conf configuration
+
+ @return <code>true</code> if native hadoop libraries, if present, can be
+ used for this job; <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setLoadNativeLibraries"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="loadNativeLibraries" type="boolean"/>
+ <doc>
+ <![CDATA[Set if native hadoop libraries, if present, can be used for this job.
+
+ @param conf configuration
+ @param loadNativeLibraries can native hadoop libraries be loaded]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A helper to load the native hadoop code i.e. libhadoop.so.
+ This handles the fallback to either the bundled libhadoop-Linux-i386-32.so
+ or the default java implementations where appropriate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.NativeCodeLoader -->
+ <!-- start class org.apache.hadoop.util.PlatformName -->
+ <class name="PlatformName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PlatformName"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPlatformName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete platform as per the java-vm.
+ @return returns the complete platform as per the java-vm.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[A helper class for getting build-info of the java-vm.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PlatformName -->
+ <!-- start class org.apache.hadoop.util.PrintJarMainClass -->
+ <class name="PrintJarMainClass" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PrintJarMainClass"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A micro-application that prints the main class name out of a jar file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PrintJarMainClass -->
+ <!-- start class org.apache.hadoop.util.PriorityQueue -->
+ <class name="PriorityQueue" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PriorityQueue"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="lessThan" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Determines the ordering of objects in this priority queue. Subclasses
+ must define this one method.]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="maxSize" type="int"/>
+ <doc>
+ <![CDATA[Subclass constructors must call this.]]>
+ </doc>
+ </method>
+ <method name="put"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="T"/>
+ <doc>
+ <![CDATA[Adds an Object to a PriorityQueue in log(size) time.
+ If one tries to add more objects than maxSize from initialize
+ a RuntimeException (ArrayIndexOutOfBound) is thrown.]]>
+ </doc>
+ </method>
+ <method name="insert" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="T"/>
+ <doc>
+ <![CDATA[Adds element to the PriorityQueue in log(size) time if either
+ the PriorityQueue is not full, or not lessThan(element, top()).
+ @param element
+ @return true if element is added, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="top" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the least element of the PriorityQueue in constant time.]]>
+ </doc>
+ </method>
+ <method name="pop" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes and returns the least element of the PriorityQueue in log(size)
+ time.]]>
+ </doc>
+ </method>
+ <method name="adjustTop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should be called when the Object at top changes values. Still log(n)
+ worst case, but it's at least twice as fast to <pre>
+ { pq.top().change(); pq.adjustTop(); }
+ </pre> instead of <pre>
+ { o = pq.pop(); o.change(); pq.push(o); }
+ </pre>]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of elements currently stored in the PriorityQueue.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes all entries from the PriorityQueue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A PriorityQueue maintains a partial ordering of its elements such that the
+ least element can always be found in constant time. Put()'s and pop()'s
+ require log(size) time.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PriorityQueue -->
+ <!-- start class org.apache.hadoop.util.ProcfsBasedProcessTree -->
+ <class name="ProcfsBasedProcessTree" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ProcfsBasedProcessTree" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setSigKillInterval"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="interval" type="long"/>
+ </method>
+ <method name="isAvailable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Checks if the ProcfsBasedProcessTree is available on this system.
+
+ @return true if ProcfsBasedProcessTree is available. False otherwise.]]>
+ </doc>
+ </method>
+ <method name="getProcessTree" return="org.apache.hadoop.util.ProcfsBasedProcessTree"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the process-tree with latest state.
+
+ @return the process-tree with latest state.]]>
+ </doc>
+ </method>
+ <method name="isAlive" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the process-tree alive? Currently we care only about the status of the
+ root-process.
+
+ @return true if the process-true is alive, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="destroy"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Destroy the process-tree. Currently we only make sure the root process is
+ gone. It is the responsibility of the root process to make sure that all
+ its descendants are cleaned up.]]>
+ </doc>
+ </method>
+ <method name="getCumulativeVmem" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the cumulative virtual memory used by all the processes in the
+ process-tree.
+
+ @return cumulative virtual memory used by the process-tree in kilobytes.]]>
+ </doc>
+ </method>
+ <method name="getPidFromPidFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pidFileName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get PID from a pid-file.
+
+ @param pidFileName
+ Name of the pid-file.
+ @return the PID string read from the pid-file. Returns null if the
+ pidFileName points to a non-existing file or if read fails from the
+ file.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string printing PIDs of process present in the
+ ProcfsBasedProcessTree. Output format : [pid pid ..]]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_SLEEPTIME_BEFORE_SIGKILL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Proc file-system based ProcessTree. Works only on Linux.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ProcfsBasedProcessTree -->
+ <!-- start class org.apache.hadoop.util.ProgramDriver -->
+ <class name="ProgramDriver" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ProgramDriver"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="mainClass" type="java.lang.Class"/>
+ <param name="description" type="java.lang.String"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is the method that adds the classed to the repository
+ @param name The name of the string you want the class instance to be called with
+ @param mainClass The class that you want to add to the repository
+ @param description The description of the class
+ @throws NoSuchMethodException
+ @throws SecurityException]]>
+ </doc>
+ </method>
+ <method name="driver"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is a driver for the example programs.
+ It looks at the first command line argument and tries to find an
+ example program with that name.
+ If it is found, it calls the main method in that class with the rest
+ of the command line arguments.
+ @param args The argument from the user. args[0] is the command to run.
+ @throws NoSuchMethodException
+ @throws SecurityException
+ @throws IllegalAccessException
+ @throws IllegalArgumentException
+ @throws Throwable Anything thrown by the example program's main]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A driver that is used to run programs added to it]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ProgramDriver -->
+ <!-- start class org.apache.hadoop.util.Progress -->
+ <class name="Progress" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Progress"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new root node.]]>
+ </doc>
+ </constructor>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a named node to the tree.]]>
+ </doc>
+ </method>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds a node to the tree.]]>
+ </doc>
+ </method>
+ <method name="startNextPhase"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Called during execution to move to the next phase at this level in the
+ tree.]]>
+ </doc>
+ </method>
+ <method name="phase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current sub-node executing.]]>
+ </doc>
+ </method>
+ <method name="complete"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Completes this node, moving the parent node to its next child.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progress" type="float"/>
+ <doc>
+ <![CDATA[Called during execution on a leaf node to set its progress.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the overall progress of the root.]]>
+ </doc>
+ </method>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Utility to assist with generation of progress reports. Applications build
+ a hierarchy of {@link Progress} instances, each modelling a phase of
+ execution. The root is constructed with {@link #Progress()}. Nodes for
+ sub-phases are created by calling {@link #addPhase()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Progress -->
+ <!-- start interface org.apache.hadoop.util.Progressable -->
+ <interface name="Progressable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Report progress to the Hadoop framework.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for reporting progress.
+
+ <p>Clients and/or applications can use the provided <code>Progressable</code>
+ to explicitly report progress to the Hadoop framework. This is especially
+ important for operations which take an insignificant amount of time since,
+ in-lieu of the reported progress, the framework has to assume that an error
+ has occured and time-out the operation.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Progressable -->
+ <!-- start class org.apache.hadoop.util.QuickSort -->
+ <class name="QuickSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="QuickSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMaxDepth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="x" type="int"/>
+ <doc>
+ <![CDATA[Deepest recursion before giving up and doing a heapsort.
+ Returns 2 * ceil(log(n)).]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the given range of items using quick sort.
+ {@inheritDoc} If the recursion depth falls below {@link #getMaxDepth},
+ then switch to {@link HeapSort}.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of QuickSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.QuickSort -->
+ <!-- start class org.apache.hadoop.util.ReflectionUtils -->
+ <class name="ReflectionUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReflectionUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theObject" type="java.lang.Object"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check and set 'configuration' if necessary.
+
+ @param theObject object for which to set configuration
+ @param conf Configuration]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;T&gt;"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create an object for the given class and initialize it from conf
+
+ @param theClass class of which an object is created
+ @param conf Configuration
+ @return a new object]]>
+ </doc>
+ </method>
+ <method name="setContentionTracing"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="boolean"/>
+ </method>
+ <method name="printThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.PrintWriter"/>
+ <param name="title" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Print all of the thread's information and stack traces.
+
+ @param stream the stream to
+ @param title a string title for the stack trace]]>
+ </doc>
+ </method>
+ <method name="logThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="title" type="java.lang.String"/>
+ <param name="minInterval" type="long"/>
+ <doc>
+ <![CDATA[Log the current thread stacks at INFO level.
+ @param log the logger that logs the stack trace
+ @param title a descriptive title for the call stacks
+ @param minInterval the minimum time from the last]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="T"/>
+ <doc>
+ <![CDATA[Return the correctly-typed {@link Class} of the given object.
+
+ @param o object whose correctly-typed <code>Class</code> is to be obtained
+ @return the correctly typed <code>Class</code> of the given object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General reflection utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ReflectionUtils -->
+ <!-- start class org.apache.hadoop.util.RunJar -->
+ <class name="RunJar" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RunJar"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="unJar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jarFile" type="java.io.File"/>
+ <param name="toDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unpack a jar file into a directory.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Run a Hadoop job jar. If the main class is not in the jar's manifest,
+ then it must be provided on the command line.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Run a Hadoop job jar.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.RunJar -->
+ <!-- start class org.apache.hadoop.util.ServletUtil -->
+ <class name="ServletUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ServletUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initHTML" return="java.io.PrintWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="response" type="javax.servlet.ServletResponse"/>
+ <param name="title" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initial HTML header]]>
+ </doc>
+ </method>
+ <method name="getParameter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.ServletRequest"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a parameter from a ServletRequest.
+ Return null if the parameter contains only white spaces.]]>
+ </doc>
+ </method>
+ <method name="htmlFooter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HTML footer to be added in the jsps.
+ @return the HTML footer.]]>
+ </doc>
+ </method>
+ <method name="percentageGraph" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="perc" type="int"/>
+ <param name="width" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate the percentage graph and returns HTML representation string
+ of the same.
+
+ @param perc The percentage value for which graph is to be generated
+ @param width The width of the display table
+ @return HTML String representation of the percentage graph
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="percentageGraph" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="perc" type="float"/>
+ <param name="width" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate the percentage graph and returns HTML representation string
+ of the same.
+ @param perc The percentage value for which graph is to be generated
+ @param width The width of the display table
+ @return HTML String representation of the percentage graph
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="HTML_TAIL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.util.ServletUtil -->
+ <!-- start class org.apache.hadoop.util.Shell -->
+ <class name="Shell" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param interval the minimum duration to wait before re-executing the
+ command.]]>
+ </doc>
+ </constructor>
+ <method name="getGROUPS_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's groups list]]>
+ </doc>
+ </method>
+ <method name="getGET_PERMISSION_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a Unix command to get permission information.]]>
+ </doc>
+ </method>
+ <method name="getUlimitMemoryCommand" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the Unix command for setting the maximum virtual memory available
+ to a given child process. This is only relevant when we are forking a
+ process from within the {@link org.apache.hadoop.mapred.Mapper} or the
+ {@link org.apache.hadoop.mapred.Reducer} implementations
+ e.g. <a href="{@docRoot}/org/apache/hadoop/mapred/pipes/package-summary.html">Hadoop Pipes</a>
+ or <a href="{@docRoot}/org/apache/hadoop/streaming/package-summary.html">Hadoop Streaming</a>.
+
+ It also checks to ensure that we are running on a *nix platform else
+ (e.g. in Cygwin/Windows) it returns <code>null</code>.
+ @param conf configuration
+ @return a <code>String[]</code> with the ulimit command arguments or
+ <code>null</code> if we are running on a non *nix platform or
+ if the limit is unspecified.]]>
+ </doc>
+ </method>
+ <method name="setEnvironment"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="env" type="java.util.Map&lt;java.lang.String, java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[set the environment for the command
+ @param env Mapping of environment variables]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[set the working directory
+ @param dir The directory where the command would be executed]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[check to see if a command needs to be executed and execute if needed]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return an array containing the command name & its parameters]]>
+ </doc>
+ </method>
+ <method name="parseExecResult"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the execution result]]>
+ </doc>
+ </method>
+ <method name="getProcess" return="java.lang.Process"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the current sub-process executing the given command
+ @return process executing the command]]>
+ </doc>
+ </method>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the exit code
+ @return the exit code of the process]]>
+ </doc>
+ </method>
+ <method name="execCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Static method to execute a shell command.
+ Covers most of the simple cases without requiring the user to implement
+ the <code>Shell</code> interface.
+ @param cmd shell command to execute.
+ @return the output of the executed command.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USER_NAME_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's name]]>
+ </doc>
+ </field>
+ <field name="SET_PERMISSION_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set permission]]>
+ </doc>
+ </field>
+ <field name="SET_OWNER_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set owner]]>
+ </doc>
+ </field>
+ <field name="SET_GROUP_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WINDOWS" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set to true on Windows platforms]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A base class for running a Unix command.
+
+ <code>Shell</code> can be used to run unix commands like <code>du</code> or
+ <code>df</code>. It also offers facilities to gate commands by
+ time-intervals.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell -->
+ <!-- start class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <class name="Shell.ExitCodeException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ExitCodeException" type="int, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is an IOException with exit code added.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <!-- start class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <class name="Shell.ShellCommandExecutor" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File, java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the shell command.]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getOutput" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the output of the shell command.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the commands of this instance.
+ Arguments with spaces in are presented with quotes round; other
+ arguments are presented raw
+
+ @return a string representation of the object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple shell command executor.
+
+ <code>ShellCommandExecutor</code>should be used in cases where the output
+ of the command needs no explicit parsing and where the command, working
+ directory and the environment remains unchanged. The output of the command
+ is stored as-is and is expected to be small.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <!-- start class org.apache.hadoop.util.StringUtils -->
+ <class name="StringUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StringUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stringifyException" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Make a string representation of the exception.
+ @param e The exception to stringify
+ @return A string with exception name and call stack.]]>
+ </doc>
+ </method>
+ <method name="simpleHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fullHostname" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a full hostname, return the word upto the first dot.
+ @param fullHostname the full hostname
+ @return the hostname to the first dot]]>
+ </doc>
+ </method>
+ <method name="humanReadableInt" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="number" type="long"/>
+ <doc>
+ <![CDATA[Given an integer, return a string that is in an approximate, but human
+ readable format.
+ It uses the bases 'k', 'm', and 'g' for 1024, 1024**2, and 1024**3.
+ @param number the number to format
+ @return a human readable form of the integer]]>
+ </doc>
+ </method>
+ <method name="formatPercent" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="done" type="double"/>
+ <param name="digits" type="int"/>
+ <doc>
+ <![CDATA[Format a percentage for presentation to the user.
+ @param done the percentage to format (0.0 to 1.0)
+ @param digits the number of digits past the decimal point
+ @return a string representation of the percentage]]>
+ </doc>
+ </method>
+ <method name="arrayToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strs" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Given an array of strings, return a comma-separated list of its elements.
+ @param strs Array of strings
+ @return Empty string if strs.length is 0, comma separated list of strings
+ otherwise]]>
+ </doc>
+ </method>
+ <method name="byteToHexString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="end" type="int"/>
+ <doc>
+ <![CDATA[Given an array of bytes it will convert the bytes to a hex string
+ representation of the bytes
+ @param bytes
+ @param start start index, inclusively
+ @param end end index, exclusively
+ @return hex string representation of the byte array]]>
+ </doc>
+ </method>
+ <method name="byteToHexString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Same as byteToHexString(bytes, 0, bytes.length).]]>
+ </doc>
+ </method>
+ <method name="hexStringToByte" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a hexstring this will return the byte array corresponding to the
+ string
+ @param hex the hex String array
+ @return a byte array that is a hex string representation of the given
+ string. The size of the byte array is therefore hex.length/2]]>
+ </doc>
+ </method>
+ <method name="uriToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uris" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[@param uris]]>
+ </doc>
+ </method>
+ <method name="stringToURI" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="stringToPath" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="formatTimeDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Given a finish and start time in long milliseconds, returns a
+ String in the format Xhrs, Ymins, Z sec, for the time difference between two times.
+ If finish time comes before start time then negative valeus of X, Y and Z wil return.
+
+ @param finishTime finish time
+ @param startTime start time]]>
+ </doc>
+ </method>
+ <method name="formatTime" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="timeDiff" type="long"/>
+ <doc>
+ <![CDATA[Given the time in long milliseconds, returns a
+ String in the format Xhrs, Ymins, Z sec.
+
+ @param timeDiff The time difference to format]]>
+ </doc>
+ </method>
+ <method name="getFormattedTimeWithDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dateFormat" type="java.text.DateFormat"/>
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Formats time in ms and appends difference (finishTime - startTime)
+ as returned by formatTimeDiff().
+ If finish time is 0, empty string is returned, if start time is 0
+ then difference is not appended to return value.
+ @param dateFormat date format to use
+ @param finishTime fnish time
+ @param startTime start time
+ @return formatted value.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an arraylist of strings.
+ @param str the comma seperated string values
+ @return the arraylist of the comma seperated string values]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a collection of strings.
+ @param str comma seperated string values
+ @return an <code>ArrayList</code> of string values]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Split a string using the default separator
+ @param str a string that may have escaped separator
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="separator" type="char"/>
+ <doc>
+ <![CDATA[Split a string using the given separator
+ @param str a string that may have escaped separator
+ @param escapeChar a char that be used to escape the separator
+ @param separator a separator char
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="findNext" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="separator" type="char"/>
+ <param name="escapeChar" type="char"/>
+ <param name="start" type="int"/>
+ <param name="split" type="java.lang.StringBuilder"/>
+ <doc>
+ <![CDATA[Finds the first occurrence of the separator character ignoring the escaped
+ separators starting from the index. Note the substring between the index
+ and the position of the separator is passed.
+ @param str the source string
+ @param separator the character to find
+ @param escapeChar character used to escape
+ @param start from where to search
+ @param split used to pass back the extracted string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Escape commas in the string using the default escape char
+ @param str a string
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Escape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the char to be escaped
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charsToEscape" type="char[]"/>
+ <doc>
+ <![CDATA[@param charsToEscape array of characters to be escaped]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Unescape commas in the string using the default escape char
+ @param str a string
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Unescape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the escaped char
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charsToEscape" type="char[]"/>
+ <doc>
+ <![CDATA[@param charsToEscape array of characters to unescape]]>
+ </doc>
+ </method>
+ <method name="getHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return hostname without throwing exception.
+ @return hostname]]>
+ </doc>
+ </method>
+ <method name="startupShutdownMessage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class&lt;?&gt;"/>
+ <param name="args" type="java.lang.String[]"/>
+ <param name="LOG" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Print a log message for starting up and shutting down
+ @param clazz the class of the server
+ @param args arguments
+ @param LOG the target log object]]>
+ </doc>
+ </method>
+ <method name="escapeHTML" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Escapes HTML Special characters present in the string.
+ @param string
+ @return HTML Escaped String representation]]>
+ </doc>
+ </method>
+ <field name="COMMA" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ESCAPE_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[General string utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.StringUtils -->
+ <!-- start class org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix -->
+ <class name="StringUtils.TraditionalBinaryPrefix" extends="java.lang.Enum&lt;org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="symbol" type="char"/>
+ <doc>
+ <![CDATA[@return The TraditionalBinaryPrefix object corresponding to the symbol.]]>
+ </doc>
+ </method>
+ <method name="string2long" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convert a string to long.
+ The input string is first be trimmed
+ and then it is parsed with traditional binary prefix.
+
+ For example,
+ "-1230k" will be converted to -1230 * 1024 = -1259520;
+ "891g" will be converted to 891 * 1024^3 = 956703965184;
+
+ @param s input string
+ @return a long value represented by the input string.]]>
+ </doc>
+ </method>
+ <field name="value" type="long"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="symbol" type="char"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The traditional binary prefixes, kilo, mega, ..., exa,
+ which can be represented by a 64-bit integer.
+ TraditionalBinaryPrefix symbol are case insensitive.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix -->
+ <!-- start interface org.apache.hadoop.util.Tool -->
+ <interface name="Tool" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Execute the command with the given arguments.
+
+ @param args command specific arguments.
+ @return exit code.
+ @throws Exception]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A tool interface that supports handling of generic command-line options.
+
+ <p><code>Tool</code>, is the standard for any Map-Reduce tool/application.
+ The tool/application should delegate the handling of
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ standard command-line options</a> to {@link ToolRunner#run(Tool, String[])}
+ and only handle its custom arguments.</p>
+
+ <p>Here is how a typical <code>Tool</code> is implemented:</p>
+ <p><blockquote><pre>
+ public class MyApp extends Configured implements Tool {
+
+ public int run(String[] args) throws Exception {
+ // <code>Configuration</code> processed by <code>ToolRunner</code>
+ Configuration conf = getConf();
+
+ // Create a JobConf using the processed <code>conf</code>
+ JobConf job = new JobConf(conf, MyApp.class);
+
+ // Process custom command-line options
+ Path in = new Path(args[1]);
+ Path out = new Path(args[2]);
+
+ // Specify various job-specific parameters
+ job.setJobName("my-app");
+ job.setInputPath(in);
+ job.setOutputPath(out);
+ job.setMapperClass(MyApp.MyMapper.class);
+ job.setReducerClass(MyApp.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ }
+
+ public static void main(String[] args) throws Exception {
+ // Let <code>ToolRunner</code> handle generic command-line options
+ int res = ToolRunner.run(new Configuration(), new Sort(), args);
+
+ System.exit(res);
+ }
+ }
+ </pre></blockquote></p>
+
+ @see GenericOptionsParser
+ @see ToolRunner]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Tool -->
+ <!-- start class org.apache.hadoop.util.ToolRunner -->
+ <class name="ToolRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ToolRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the given <code>Tool</code> by {@link Tool#run(String[])}, after
+ parsing with the given generic arguments. Uses the given
+ <code>Configuration</code>, or builds one if null.
+
+ Sets the <code>Tool</code>'s configuration with the possibly modified
+ version of the <code>conf</code>.
+
+ @param conf <code>Configuration</code> for the <code>Tool</code>.
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the <code>Tool</code> with its <code>Configuration</code>.
+
+ Equivalent to <code>run(tool.getConf(), tool, args)</code>.
+
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Prints generic command-line argurments and usage information.
+
+ @param out stream to write usage information to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A utility to help run {@link Tool}s.
+
+ <p><code>ToolRunner</code> can be used to run classes implementing
+ <code>Tool</code> interface. It works in conjunction with
+ {@link GenericOptionsParser} to parse the
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ generic hadoop command line arguments</a> and modifies the
+ <code>Configuration</code> of the <code>Tool</code>. The
+ application-specific options are passed along without being modified.
+ </p>
+
+ @see Tool
+ @see GenericOptionsParser]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ToolRunner -->
+ <!-- start class org.apache.hadoop.util.UTF8ByteArrayUtils -->
+ <class name="UTF8ByteArrayUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UTF8ByteArrayUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="findByte" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="end" type="int"/>
+ <param name="b" type="byte"/>
+ <doc>
+ <![CDATA[Find the first occurrence of the given byte b in a UTF-8 encoded string
+ @param utf a byte array containing a UTF-8 encoded string
+ @param start starting offset
+ @param end ending position
+ @param b the byte to find
+ @return position that first byte occures otherwise -1]]>
+ </doc>
+ </method>
+ <method name="findBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="end" type="int"/>
+ <param name="b" type="byte[]"/>
+ <doc>
+ <![CDATA[Find the first occurrence of the given bytes b in a UTF-8 encoded string
+ @param utf a byte array containing a UTF-8 encoded string
+ @param start starting offset
+ @param end ending position
+ @param b the bytes to find
+ @return position that first byte occures otherwise -1]]>
+ </doc>
+ </method>
+ <method name="findNthByte" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="b" type="byte"/>
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Find the nth occurrence of the given byte b in a UTF-8 encoded string
+ @param utf a byte array containing a UTF-8 encoded string
+ @param start starting offset
+ @param length the length of byte array
+ @param b the byte to find
+ @param n the desired occurrence of the given byte
+ @return position that nth occurrence of the given byte if exists; otherwise -1]]>
+ </doc>
+ </method>
+ <method name="findNthByte" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="b" type="byte"/>
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Find the nth occurrence of the given byte b in a UTF-8 encoded string
+ @param utf a byte array containing a UTF-8 encoded string
+ @param b the byte to find
+ @param n the desired occurrence of the given byte
+ @return position that nth occurrence of the given byte if exists; otherwise -1]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.util.UTF8ByteArrayUtils -->
+ <!-- start class org.apache.hadoop.util.VersionInfo -->
+ <class name="VersionInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the Hadoop version.
+ @return the Hadoop version string, eg. "0.6.3-dev"]]>
+ </doc>
+ </method>
+ <method name="getRevision" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion revision number for the root directory
+ @return the revision number, eg. "451451"]]>
+ </doc>
+ </method>
+ <method name="getDate" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The date that Hadoop was compiled.
+ @return the compilation date in unix date format]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The user that compiled Hadoop.
+ @return the username of the user]]>
+ </doc>
+ </method>
+ <method name="getUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion URL for the root Hadoop directory.]]>
+ </doc>
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the buildVersion which includes version,
+ revision, user and date.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[This class finds the package info for Hadoop and the HadoopVersionAnnotation
+ information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.VersionInfo -->
+ <!-- start class org.apache.hadoop.util.XMLUtils -->
+ <class name="XMLUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="XMLUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="transform"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="styleSheet" type="java.io.InputStream"/>
+ <param name="xml" type="java.io.InputStream"/>
+ <param name="out" type="java.io.Writer"/>
+ <exception name="TransformerConfigurationException" type="javax.xml.transform.TransformerConfigurationException"/>
+ <exception name="TransformerException" type="javax.xml.transform.TransformerException"/>
+ <doc>
+ <![CDATA[Transform input xml given a stylesheet.
+
+ @param styleSheet the style-sheet
+ @param xml input xml data
+ @param out output
+ @throws TransformerConfigurationException
+ @throws TransformerException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General xml utilities.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.XMLUtils -->
+</package>
+
+</api>
diff --git a/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.19.1.xml b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.19.1.xml
new file mode 100644
index 0000000000..92bdd2c799
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.19.1.xml
@@ -0,0 +1,44195 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Fri Feb 20 00:10:24 UTC 2009 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="hadoop 0.19.1"
+ jdversion="1.1.1">
+
+<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/hadoopqa/tools/jdiff/latest/jdiff.jar:/home/hadoopqa/tools/jdiff/latest/xerces.jar -classpath /home/ndaley/hadoop/branch-0.19/build/classes:/home/ndaley/hadoop/branch-0.19/lib/commons-cli-2.0-SNAPSHOT.jar:/home/ndaley/hadoop/branch-0.19/lib/commons-codec-1.3.jar:/home/ndaley/hadoop/branch-0.19/lib/commons-httpclient-3.0.1.jar:/home/ndaley/hadoop/branch-0.19/lib/commons-logging-1.0.4.jar:/home/ndaley/hadoop/branch-0.19/lib/commons-logging-api-1.0.4.jar:/home/ndaley/hadoop/branch-0.19/lib/commons-net-1.4.1.jar:/home/ndaley/hadoop/branch-0.19/lib/hsqldb-1.8.0.10.jar:/home/ndaley/hadoop/branch-0.19/lib/jets3t-0.6.1.jar:/home/ndaley/hadoop/branch-0.19/lib/jetty-5.1.4.jar:/home/ndaley/hadoop/branch-0.19/lib/jetty-ext/commons-el.jar:/home/ndaley/hadoop/branch-0.19/lib/jetty-ext/jasper-compiler.jar:/home/ndaley/hadoop/branch-0.19/lib/jetty-ext/jasper-runtime.jar:/home/ndaley/hadoop/branch-0.19/lib/jetty-ext/jsp-api.jar:/home/ndaley/hadoop/branch-0.19/lib/junit-3.8.1.jar:/home/ndaley/hadoop/branch-0.19/lib/kfs-0.2.0.jar:/home/ndaley/hadoop/branch-0.19/lib/log4j-1.2.15.jar:/home/ndaley/hadoop/branch-0.19/lib/oro-2.0.8.jar:/home/ndaley/hadoop/branch-0.19/lib/servlet-api.jar:/home/ndaley/hadoop/branch-0.19/lib/slf4j-api-1.4.3.jar:/home/ndaley/hadoop/branch-0.19/lib/slf4j-log4j12-1.4.3.jar:/home/ndaley/hadoop/branch-0.19/lib/xmlenc-0.52.jar:/home/ndaley/hadoop/branch-0.19/conf:/home/ndaley/tools/ant/latest/lib/ant-launcher.jar:/home/ndaley/tools/ant/latest/lib/ant-antlr.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-bcel.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-bsf.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-log4j.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-oro.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-regexp.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-resolver.jar:/home/ndaley/tools/ant/latest/lib/ant-commons-logging.jar:/home/ndaley/tools/ant/latest/lib/ant-commons-net.jar:/home/ndaley/tools/ant/latest/lib/ant-jai.jar:/home/ndaley/tools/ant/latest/lib/ant-javamail.jar:/home/ndaley/tools/ant/latest/lib/ant-jdepend.jar:/home/ndaley/tools/ant/latest/lib/ant-jmf.jar:/home/ndaley/tools/ant/latest/lib/ant-jsch.jar:/home/ndaley/tools/ant/latest/lib/ant-junit.jar:/home/ndaley/tools/ant/latest/lib/ant-netrexx.jar:/home/ndaley/tools/ant/latest/lib/ant-nodeps.jar:/home/ndaley/tools/ant/latest/lib/ant-starteam.jar:/home/ndaley/tools/ant/latest/lib/ant-stylebook.jar:/home/ndaley/tools/ant/latest/lib/ant-swing.jar:/home/ndaley/tools/ant/latest/lib/ant-testutil.jar:/home/ndaley/tools/ant/latest/lib/ant-trax.jar:/home/ndaley/tools/ant/latest/lib/ant-weblogic.jar:/home/ndaley/tools/ant/latest/lib/ant.jar:/home/ndaley/tools/ant/latest/lib/xercesImpl.jar:/home/ndaley/tools/ant/latest/lib/xml-apis.jar:/home/hadoopqa/tools/java/jdk1.6.0_07-32bit/lib/tools.jar -sourcepath /home/ndaley/hadoop/branch-0.19/src/core:/home/ndaley/hadoop/branch-0.19/src/mapred:/home/ndaley/hadoop/branch-0.19/src/tools -apidir /home/ndaley/hadoop/branch-0.19/docs/jdiff -apiname hadoop 0.19.1 -->
+<package name="org.apache.hadoop">
+ <!-- start class org.apache.hadoop.HadoopVersionAnnotation -->
+ <class name="HadoopVersionAnnotation" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.annotation.Annotation"/>
+ <doc>
+ <![CDATA[A package attribute that captures the version of Hadoop that was compiled.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.HadoopVersionAnnotation -->
+</package>
+<package name="org.apache.hadoop.conf">
+ <!-- start interface org.apache.hadoop.conf.Configurable -->
+ <interface name="Configurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration to be used by this object.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the configuration used by this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Something that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.conf.Configurable -->
+ <!-- start class org.apache.hadoop.conf.Configuration -->
+ <class name="Configuration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable&lt;java.util.Map.Entry&lt;java.lang.String, java.lang.String&gt;&gt;"/>
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configuration" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration where the behavior of reading from the default
+ resources can be turned off.
+
+ If the parameter {@code loadDefaults} is false, the new instance
+ will not load resources from the default files.
+ @param loadDefaults specifies whether to load from the default files]]>
+ </doc>
+ </constructor>
+ <constructor name="Configuration" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration with the same settings cloned from another.
+
+ @param other the configuration from which to clone settings.]]>
+ </doc>
+ </constructor>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param name resource to be added, the classpath is examined for a file
+ with that name.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="url" type="java.net.URL"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param url url of the resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param file file-path of resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param in InputStream to deserialize the object from.]]>
+ </doc>
+ </method>
+ <method name="reloadConfiguration"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reload configuration from previously added resources.
+
+ This method will clear all the configuration read from the added
+ resources, and final parameters. This will make the resources to
+ be read again before accessing the values. Values that are added
+ via set methods will overlay values read from the resources.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, <code>null</code> if
+ no such property exists.
+
+ Values are processed for <a href="#VariableExpansion">variable expansion</a>
+ before being returned.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="getRaw" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, without doing
+ <a href="#VariableExpansion">variable expansion</a>.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the <code>value</code> of the <code>name</code> property.
+
+ @param name property name.
+ @param value property value.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property. If no such property
+ exists, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value, or <code>defaultValue</code> if the property
+ doesn't exist.]]>
+ </doc>
+ </method>
+ <method name="getInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="int"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as an <code>int</code>.
+
+ If no such property exists, or if the specified value is not a valid
+ <code>int</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as an <code>int</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to an <code>int</code>.
+
+ @param name property name.
+ @param value <code>int</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="long"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>long</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>long</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>long</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>long</code>.
+
+ @param name property name.
+ @param value <code>long</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="float"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>float</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>float</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>float</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getBoolean" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="boolean"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>boolean</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>boolean</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>boolean</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setBoolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>boolean</code>.
+
+ @param name property name.
+ @param value <code>boolean</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Parse the given attribute as a set of integer ranges
+ @param name the attribute name
+ @param defaultValue the default value if it is not set
+ @return a new set of ranges from the configured value]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ a collection of <code>String</code>s.
+ If no such property is specified then empty collection is returned.
+ <p>
+ This is an optimized version of {@link #getStrings(String)}
+
+ @param name property name.
+ @return property value as a collection of <code>String</code>s.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then <code>null</code> is returned.
+
+ @param name property name.
+ @return property value as an array of <code>String</code>s,
+ or <code>null</code>.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then default value is returned.
+
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of <code>String</code>s,
+ or default value.]]>
+ </doc>
+ </method>
+ <method name="setStrings"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="values" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Set the array of string values for the <code>name</code> property as
+ as comma delimited values.
+
+ @param name property name.
+ @param values The values]]>
+ </doc>
+ </method>
+ <method name="getClassByName" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Load a class by name.
+
+ @param name the class name.
+ @return the class object.
+ @throws ClassNotFoundException if the class is not found.]]>
+ </doc>
+ </method>
+ <method name="getClasses" return="java.lang.Class[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class[]"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property
+ as an array of <code>Class</code>.
+ The value of the property specifies a list of comma separated class names.
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ @param name the property name.
+ @param defaultValue default value.
+ @return property value as a <code>Class[]</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>.
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;? extends U&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends U&gt;"/>
+ <param name="xface" type="java.lang.Class&lt;U&gt;"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>
+ implementing the interface specified by <code>xface</code>.
+
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ An exception is thrown if the returned class does not implement the named
+ interface.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @param xface the interface implemented by the named class.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="xface" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to the name of a
+ <code>theClass</code> implementing the given interface <code>xface</code>.
+
+ An exception is thrown if <code>theClass</code> does not implement the
+ interface <code>xface</code>.
+
+ @param name property name.
+ @param theClass property value.
+ @param xface the interface implemented by the named class.]]>
+ </doc>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file under a directory named by <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file name under a directory named in <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getResource" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the {@link URL} for the named resource.
+
+ @param name resource name.
+ @return the url for the named resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get an input stream attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return an input stream attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsReader" return="java.io.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a {@link Reader} attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return a reader attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of keys in the configuration.
+
+ @return number of keys in the configuration.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Clears all keys from the configuration.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;java.util.Map.Entry&lt;java.lang.String, java.lang.String&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get an {@link Iterator} to go through the list of <code>String</code>
+ key-value pairs in the configuration.
+
+ @return an iterator over the entries.]]>
+ </doc>
+ </method>
+ <method name="writeXml"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write out the non-default properties in this configuration to the give
+ {@link OutputStream}.
+
+ @param out the output stream to write to.]]>
+ </doc>
+ </method>
+ <method name="getClassLoader" return="java.lang.ClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link ClassLoader} for this job.
+
+ @return the correct class loader.]]>
+ </doc>
+ </method>
+ <method name="setClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="classLoader" type="java.lang.ClassLoader"/>
+ <doc>
+ <![CDATA[Set the class loader that will be used to load the various objects.
+
+ @param classLoader the new class loader.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setQuietMode"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="quietmode" type="boolean"/>
+ <doc>
+ <![CDATA[Set the quiteness-mode.
+
+ In the the quite-mode error and informational messages might not be logged.
+
+ @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
+ to turn it off.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[For debugging. List non-default properties to the terminal and exit.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Provides access to configuration parameters.
+
+ <h4 id="Resources">Resources</h4>
+
+ <p>Configurations are specified by resources. A resource contains a set of
+ name/value pairs as XML data. Each resource is named by either a
+ <code>String</code> or by a {@link Path}. If named by a <code>String</code>,
+ then the classpath is examined for a file with that name. If named by a
+ <code>Path</code>, then the local filesystem is examined directly, without
+ referring to the classpath.
+
+ <p>Unless explicitly turned off, Hadoop by default specifies two
+ resources, loaded in-order from the classpath: <ol>
+ <li><tt><a href="{@docRoot}/../hadoop-default.html">hadoop-default.xml</a>
+ </tt>: Read-only defaults for hadoop.</li>
+ <li><tt>hadoop-site.xml</tt>: Site-specific configuration for a given hadoop
+ installation.</li>
+ </ol>
+ Applications may add additional resources, which are loaded
+ subsequent to these resources in the order they are added.
+
+ <h4 id="FinalParams">Final Parameters</h4>
+
+ <p>Configuration parameters may be declared <i>final</i>.
+ Once a resource declares a value final, no subsequently-loaded
+ resource can alter that value.
+ For example, one might define a final parameter with:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;dfs.client.buffer.dir&lt;/name&gt;
+ &lt;value&gt;/tmp/hadoop/dfs/client&lt;/value&gt;
+ <b>&lt;final&gt;true&lt;/final&gt;</b>
+ &lt;/property&gt;</pre></tt>
+
+ Administrators typically define parameters as final in
+ <tt>hadoop-site.xml</tt> for values that user applications may not alter.
+
+ <h4 id="VariableExpansion">Variable Expansion</h4>
+
+ <p>Value strings are first processed for <i>variable expansion</i>. The
+ available properties are:<ol>
+ <li>Other properties defined in this Configuration; and, if a name is
+ undefined here,</li>
+ <li>Properties in {@link System#getProperties()}.</li>
+ </ol>
+
+ <p>For example, if a configuration resource contains the following property
+ definitions:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;basedir&lt;/name&gt;
+ &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
+ &lt;/property&gt;
+
+ &lt;property&gt;
+ &lt;name&gt;tempdir&lt;/name&gt;
+ &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt;
+ &lt;/property&gt;</pre></tt>
+
+ When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt>
+ will be resolved to another property in this Configuration, while
+ <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value
+ of the System property with that name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration -->
+ <!-- start class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <class name="Configuration.IntegerRanges" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Configuration.IntegerRanges"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Configuration.IntegerRanges" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isIncluded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Is the given value in the set of ranges
+ @param value the value to check
+ @return is the value in the ranges?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A class that represents a set of positive integer ranges. It parses
+ strings of the form: "2-3,5,7-" where ranges are separated by comma and
+ the lower/upper bounds are separated by dash. Either the lower or upper
+ bound may be omitted meaning all values up to or over. So the string
+ above means 2, 3, 5, and 7, 8, 9, ...]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <!-- start class org.apache.hadoop.conf.Configured -->
+ <class name="Configured" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Configured"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configured" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Base class for things that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configured -->
+</package>
+<package name="org.apache.hadoop.filecache">
+ <!-- start class org.apache.hadoop.filecache.DistributedCache -->
+ <class name="DistributedCache" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedCache"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param fileStatus The file status on the dfs.
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred is
+ returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="honorSymLinkConf" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param fileStatus The file status on the dfs.
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred is
+ returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @param honorSymLinkConf if this is false, then the symlinks are not
+ created even if conf says so (this is required for an optimization in task
+ launches
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred
+ is returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="releaseCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is the opposite of getlocalcache. When you are done with
+ using the cache, you need to release the cache
+ @param cache The cache URI to be released
+ @param conf configuration which contains the filesystem the cache
+ is contained in.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeRelative" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTimestamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="cache" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns mtime of a given cache file on hdfs.
+ @param conf configuration
+ @param cache cache file
+ @return mtime of a given cache file on hdfs
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createAllSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="jobCacheDir" type="java.io.File"/>
+ <param name="workDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method create symlinks for all files in a given dir in another directory
+ @param conf the configuration
+ @param jobCacheDir the target directory for creating symlinks
+ @param workDir the directory in which the symlinks are created
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setCacheArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archives" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of archives
+ @param archives The list of archives that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="setCacheFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of files
+ @param files The list of files that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="getCacheArchives" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache archives set in the Configuration
+ @param conf The configuration which contains the archives
+ @return A URI array of the caches set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCacheFiles" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache files set in the Configuration
+ @param conf The configuration which contains the files
+ @return A URI array of the files set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheArchives" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized caches
+ @param conf Configuration that contains the localized archives
+ @return A path array of localized caches
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized files
+ @param conf Configuration that contains the localized files
+ @return A path array of localized files
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getArchiveTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the archives
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFileTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the files
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setArchiveTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the archives to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of archives.
+ The order should be the same as the order in which the archives are added.]]>
+ </doc>
+ </method>
+ <method name="setFileTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the files to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of files.
+ The order should be the same as the order in which the files are added.]]>
+ </doc>
+ </method>
+ <method name="setLocalArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized archives
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+ </doc>
+ </method>
+ <method name="setLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized files
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+ </doc>
+ </method>
+ <method name="addCacheArchive"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a archives to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addCacheFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a file to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addFileToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an file path to the current set of classpath entries It adds the file
+ to cache as well.
+
+ @param file Path of the file to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getFileClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the file entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="addArchiveToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archive" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an archive path to the current set of classpath entries. It adds the
+ archive to cache as well.
+
+ @param archive Path of the archive to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getArchiveClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the archive entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method allows you to create symlinks in the current working directory
+ of the task to all the cache files/archives
+ @param conf the jobconf]]>
+ </doc>
+ </method>
+ <method name="getSymlink" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method checks to see if symlinks are to be create for the
+ localized cache files in the current working directory
+ @param conf the jobconf
+ @return true if symlinks are to be created- else return false]]>
+ </doc>
+ </method>
+ <method name="checkURIs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uriFiles" type="java.net.URI[]"/>
+ <param name="uriArchives" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[This method checks if there is a conflict in the fragment names
+ of the uris. Also makes sure that each uri has a fragment. It
+ is only to be called if you want to create symlinks for
+ the various archives and files.
+ @param uriFiles The uri array of urifiles
+ @param uriArchives the uri array of uri archives]]>
+ </doc>
+ </method>
+ <method name="purgeCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clear the entire contents of the cache and delete the backing files. This
+ should only be used when the server is reinitializing, because the users
+ are going to lose their files.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Distribute application-specific large, read-only files efficiently.
+
+ <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
+ framework to cache files (text, archives, jars etc.) needed by applications.
+ </p>
+
+ <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
+ via the {@link org.apache.hadoop.mapred.JobConf}.
+ The <code>DistributedCache</code> assumes that the
+ files specified via hdfs:// urls are already present on the
+ {@link FileSystem} at the path specified by the url.</p>
+
+ <p>The framework will copy the necessary files on to the slave node before
+ any tasks for the job are executed on that node. Its efficiency stems from
+ the fact that the files are only copied once per job and the ability to
+ cache archives which are un-archived on the slaves.</p>
+
+ <p><code>DistributedCache</code> can be used to distribute simple, read-only
+ data/text files and/or more complex types such as archives, jars etc.
+ Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes.
+ Jars may be optionally added to the classpath of the tasks, a rudimentary
+ software distribution mechanism. Files have execution permissions.
+ Optionally users can also direct it to symlink the distributed cache file(s)
+ into the working directory of the task.</p>
+
+ <p><code>DistributedCache</code> tracks modification timestamps of the cache
+ files. Clearly the cache files should not be modified by the application
+ or externally while the job is executing.</p>
+
+ <p>Here is an illustrative example on how to use the
+ <code>DistributedCache</code>:</p>
+ <p><blockquote><pre>
+ // Setting up the cache for the application
+
+ 1. Copy the requisite files to the <code>FileSystem</code>:
+
+ $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
+ $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
+ $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
+ $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
+ $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
+ $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
+
+ 2. Setup the application's <code>JobConf</code>:
+
+ JobConf job = new JobConf();
+ DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
+ job);
+ DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
+ DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
+
+ 3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
+ or {@link org.apache.hadoop.mapred.Reducer}:
+
+ public static class MapClass extends MapReduceBase
+ implements Mapper&lt;K, V, K, V&gt; {
+
+ private Path[] localArchives;
+ private Path[] localFiles;
+
+ public void configure(JobConf job) {
+ // Get the cached archives/files
+ localArchives = DistributedCache.getLocalCacheArchives(job);
+ localFiles = DistributedCache.getLocalCacheFiles(job);
+ }
+
+ public void map(K key, V value,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Use data from the cached archives/files here
+ // ...
+ // ...
+ output.collect(k, v);
+ }
+ }
+
+ </pre></blockquote></p>
+
+ @see org.apache.hadoop.mapred.JobConf
+ @see org.apache.hadoop.mapred.JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.filecache.DistributedCache -->
+</package>
+<package name="org.apache.hadoop.fs">
+ <!-- start class org.apache.hadoop.fs.BlockLocation -->
+ <class name="BlockLocation" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlockLocation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with host, name, offset and length]]>
+ </doc>
+ </constructor>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hosts (hostname) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of names (hostname:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the length of the block]]>
+ </doc>
+ </method>
+ <method name="setOffset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <doc>
+ <![CDATA[Set the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="setLength"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="length" type="long"/>
+ <doc>
+ <![CDATA[Set the length of block]]>
+ </doc>
+ </method>
+ <method name="setHosts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hosts" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the hosts hosting this block]]>
+ </doc>
+ </method>
+ <method name="setNames"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the names (host:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement write of Writable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement readFields of Writable]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BlockLocation -->
+ <!-- start class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <class name="BufferedFSInputStream" extends="java.io.BufferedInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="BufferedFSInputStream" type="org.apache.hadoop.fs.FSInputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a <code>BufferedFSInputStream</code>
+ with the specified buffer size,
+ and saves its argument, the input stream
+ <code>in</code>, for later use. An internal
+ buffer array of length <code>size</code>
+ is created and stored in <code>buf</code>.
+
+ @param in the underlying input stream.
+ @param size the buffer size.
+ @exception IllegalArgumentException if size <= 0.]]>
+ </doc>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A class optimizes reading from FSInputStream by bufferring]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <!-- start class org.apache.hadoop.fs.ChecksumException -->
+ <class name="ChecksumException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumException" type="java.lang.String, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Thrown for checksum errors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumException -->
+ <!-- start class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <class name="ChecksumFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getApproxChkSumLength" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getRawFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the raw file system]]>
+ </doc>
+ </method>
+ <method name="getChecksumFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return the name of the checksum file associated with a file.]]>
+ </doc>
+ </method>
+ <method name="isChecksumFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return true iff file is a checksum file name.]]>
+ </doc>
+ </method>
+ <method name="getChecksumFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileSize" type="long"/>
+ <doc>
+ <![CDATA[Return the length of the checksum file given the size of the
+ actual file.]]>
+ </doc>
+ </method>
+ <method name="getBytesPerSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the bytes Per Checksum]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getChecksumLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ <param name="bytesPerSum" type="int"/>
+ <doc>
+ <![CDATA[Calculated the length of the checksum file in bytes.
+ @param size the length of the data file in bytes
+ @param bytesPerSum the number of bytes in a checksum block
+ @return the number of bytes in the checksum file]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+ Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt>
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename files/dirs]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement the delete(Path, boolean) in checksum
+ file system.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="copyCrc" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ If src and dst are directories, the copyCrc parameter
+ determines whether to copy CRC files.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Report a checksum error to the file system.
+ @param f the file name containing the error
+ @param in the stream open on the file
+ @param inPos the position of the beginning of the bad data in the file
+ @param sums the stream open on the checksum file
+ @param sumsPos the position of the beginning of the bad data in the checksum file
+ @return if retry is neccessary]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract Checksumed FileSystem.
+ It provide a basice implementation of a Checksumed FileSystem,
+ which creates a checksum file for each raw file.
+ It generates & verifies checksums at the client side.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ContentSummary -->
+ <class name="ContentSummary" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ContentSummary"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long, long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the length]]>
+ </doc>
+ </method>
+ <method name="getDirectoryCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the directory count]]>
+ </doc>
+ </method>
+ <method name="getFileCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the file count]]>
+ </doc>
+ </method>
+ <method name="getQuota" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the directory quota]]>
+ </doc>
+ </method>
+ <method name="getSpaceConsumed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retuns (disk) space consumed]]>
+ </doc>
+ </method>
+ <method name="getSpaceQuota" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns (disk) space quota]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHeader" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the header of the output.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the header of the output]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the string representation of the object in the output format.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the string representation of the object]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store the summary of a content (a directory or a file).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ContentSummary -->
+ <!-- start class org.apache.hadoop.fs.DF -->
+ <class name="DF" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DF" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="DF" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFilesystem" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAvailable" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPercentUsed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMount" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="DF_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'df' program.
+ Tested on Linux, FreeBSD, Cygwin.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DF -->
+ <!-- start class org.apache.hadoop.fs.DU -->
+ <class name="DU" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DU" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param interval refresh the disk usage at this interval
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <constructor name="DU" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param conf configuration object
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <method name="decDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Decrease how much disk space we use.
+ @param value decrease by this value]]>
+ </doc>
+ </method>
+ <method name="incDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Increase how much disk space we use.
+ @param value increase by this value]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return disk space used
+ @throws IOException if the shell command fails]]>
+ </doc>
+ </method>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the path of which we're keeping track of disk usage]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Start the disk usage checking thread.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down the refreshing thread.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'du' program]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DU -->
+ <!-- start class org.apache.hadoop.fs.FileChecksum -->
+ <class name="FileChecksum" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FileChecksum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getAlgorithmName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The checksum algorithm name]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The length of the checksum in bytes]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The value of the checksum in bytes]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true if both the algorithms and the values are the same.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An abstract class representing file checksums for files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileChecksum -->
+ <!-- start class org.apache.hadoop.fs.FileStatus -->
+ <class name="FileStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <constructor name="FileStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this a directory?
+ @return true if this is a directory]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the block size of the file.
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the replication factor of a file.
+ @return the replication factor of a file.]]>
+ </doc>
+ </method>
+ <method name="getModificationTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the modification time of the file.
+ @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getAccessTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the access time of the file.
+ @return the access time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get FsPermission associated with the file.
+ @return permssion. If a filesystem does not have a notion of permissions
+ or if permissions could not be determined, then default
+ permissions equivalent of "rwxrwxrwx" is returned.]]>
+ </doc>
+ </method>
+ <method name="getOwner" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the owner of the file.
+ @return owner of the file. The string could be empty if there is no
+ notion of owner of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getGroup" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the group associated with the file.
+ @return group for the file. The string could be empty if there is no
+ notion of group of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Sets permission.
+ @param permission if permission is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="owner" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets owner.
+ @param owner if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setGroup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets group.
+ @param group if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare this object to another object
+
+ @param o the object to be compared.
+ @return a negative integer, zero, or a positive integer as this object
+ is less than, equal to, or greater than the specified object.
+
+ @throws ClassCastException if the specified object's is not of
+ type FileStatus]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare if this object is equal to another object
+ @param o the object to be compared.
+ @return true if two file status has the same path name; false if not.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for the object, which is defined as
+ the hash code of the path name.
+
+ @return a hash code value for the path name.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that represents the client side information for a file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileStatus -->
+ <!-- start class org.apache.hadoop.fs.FileSystem -->
+ <class name="FileSystem" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="FileSystem"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseArgs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Consider using {@link GenericOptionsParser} instead.">
+ <param name="argv" type="java.lang.String[]"/>
+ <param name="i" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the cmd-line args, starting at i. Remove consumed args
+ from array. We expect param in the form:
+ '-local | -dfs <namenode:port>'
+ @deprecated Consider using {@link GenericOptionsParser} instead.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the configured filesystem implementation.]]>
+ </doc>
+ </method>
+ <method name="getDefaultUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default filesystem URI from a configuration.
+ @param conf the configuration to access
+ @return the uri of the default filesystem]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.net.URI"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="getNamed" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="call #get(URI,Configuration) instead.">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated call #get(URI,Configuration) instead.]]>
+ </doc>
+ </method>
+ <method name="getLocal" return="org.apache.hadoop.fs.LocalFileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the local file syste
+ @param conf the configuration to configure the file system with
+ @return a LocalFileSystem]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the FileSystem for this URI's scheme and authority. The scheme
+ of the URI determines a configuration property name,
+ <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
+ The entire URI is passed to the FileSystem instance's initialize method.]]>
+ </doc>
+ </method>
+ <method name="closeAll"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all cached filesystems. Be sure those filesystems are not
+ used anymore.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a file with the provided permission
+ The permission of the file is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ It is implemented using two RPCs. It is understood that it is inefficient,
+ but the implementation is thread-safe. The other option is to change the
+ value of umask in configuration to be 0, but it is not thread-safe.
+
+ @param fs file system handle
+ @param file the name of the file to be created
+ @param permission the permission of the file
+ @return an output stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a directory with the provided permission
+ The permission of the directory is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ @see #create(FileSystem, Path, FsPermission)
+
+ @param fs file system handle
+ @param dir the name of the directory to be created
+ @param permission the permission of the directory
+ @return true if the directory creation succeeds; false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file to open]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param permission
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize
+ @param progress
+ @throws IOException
+ @see #setPermission(Path, FsPermission)]]>
+ </doc>
+ </method>
+ <method name="createNewFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the given Path as a brand-new zero-length file. If
+ create fails, or if it already existed, return false.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, getConf().getInt("io.file.buffer.size", 4096), null)
+ @param f the existing file to be appended.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, bufferSize, null).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @param progress for reporting progress if it is not null.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get replication.
+
+ @deprecated Use getFileStatus() instead
+ @param src file name
+ @return file replication
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file.
+
+ @param f the path to delete.
+ @param recursive if path is a directory and set to
+ true, the directory is deleted else throws an exception. In
+ case of a file the recursive can be set to either true or false.
+ @return true if delete is successful else false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="deleteOnExit" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a path to be deleted when FileSystem is closed.
+ When the JVM shuts down,
+ all FileSystem objects will be closed automatically.
+ Then,
+ the marked path will be deleted as a result of closing the FileSystem.
+
+ The path has to exist in the file system.
+
+ @param f the path to delete.
+ @return true if deleteOnExit is successful, otherwise false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="processDeleteOnExit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Delete all files that were marked as delete-on-exit. This recursively
+ deletes all files in the specified paths.]]>
+ </doc>
+ </method>
+ <method name="exists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if exists.
+ @param f source file]]>
+ </doc>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[True iff the named path is a regular file.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the {@link ContentSummary} of a given {@link Path}.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given path using the user-supplied path
+ filter.
+
+ @param f
+ a path name
+ @param filter
+ the user-supplied path filter
+ @return an array of FileStatus objects for the files under the given path
+ after applying the filter
+ @throws IOException
+ if encounter any problem while fetching the status]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using default
+ path filter.
+
+ @param files
+ a list of paths
+ @return a list of statuses for the files under the given paths after
+ applying the filter default Path filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using user-supplied
+ path filter.
+
+ @param files
+ a list of paths
+ @param filter
+ the user-supplied path filter
+ @return a list of statuses for the files under the given paths after
+ applying the filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Return all the files that match filePattern and are not checksum
+ files. Results are sorted by their names.
+
+ <p>
+ A filename pattern is composed of <i>regular</i> characters and
+ <i>special pattern matching</i> characters, which are:
+
+ <dl>
+ <dd>
+ <dl>
+ <p>
+ <dt> <tt> ? </tt>
+ <dd> Matches any single character.
+
+ <p>
+ <dt> <tt> * </tt>
+ <dd> Matches zero or more characters.
+
+ <p>
+ <dt> <tt> [<i>abc</i>] </tt>
+ <dd> Matches a single character from character set
+ <tt>{<i>a,b,c</i>}</tt>.
+
+ <p>
+ <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
+ <dd> Matches a single character from the character range
+ <tt>{<i>a...b</i>}</tt>. Note that character <tt><i>a</i></tt> must be
+ lexicographically less than or equal to character <tt><i>b</i></tt>.
+
+ <p>
+ <dt> <tt> [^<i>a</i>] </tt>
+ <dd> Matches a single character that is not from character set or range
+ <tt>{<i>a</i>}</tt>. Note that the <tt>^</tt> character must occur
+ immediately to the right of the opening bracket.
+
+ <p>
+ <dt> <tt> \<i>c</i> </tt>
+ <dd> Removes (escapes) any special meaning of character <i>c</i>.
+
+ <p>
+ <dt> <tt> {ab,cd} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt>
+
+ <p>
+ <dt> <tt> {ab,c{de,fh}} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cde, cfh</i>}</tt>
+
+ </dl>
+ </dd>
+ </dl>
+
+ @param pathPattern a regular expression specifying a pth pattern
+
+ @return an array of paths that match the path pattern
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array of FileStatus objects whose path names match pathPattern
+ and is accepted by the user-supplied path filter. Results are sorted by
+ their path names.
+ Return null if pathPattern has no glob and the path does not exist.
+ Return an empty array if pathPattern has a glob and no path matches it.
+
+ @param pathPattern
+ a regular expression specifying the path pattern
+ @param filter
+ a user-supplied path filter
+ @return an array of FileStatus objects
+ @throws IOException if any I/O error occurs when fetching file status]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the current user's home directory in this filesystem.
+ The default implementation returns "/user/$USER/".]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param new_dir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #mkdirs(Path, FsPermission)} with default permission.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make the given file and all non-existent parents into
+ directories. Has the semantics of Unix 'mkdir -p'.
+ Existence of the directory hierarchy is not an error.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name and the source is kept intact afterwards]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files are on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="moveToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ Remove the source afterwards]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[No more filesystem operations are needed. Will
+ release any held locks.]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total size of all files in the filesystem.]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a file status object that represents the path.
+ @param f The path we want information from
+ @return a FileStatus object
+ @throws FileNotFoundException when the path does not exist;
+ IOException see specific implementation]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the checksum of a file.
+
+ @param f The file path
+ @return The file checksum. The default return value is null,
+ which indicates that no checksum algorithm is implemented
+ in the corresponding FileSystem.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permission of a path.
+ @param p
+ @param permission]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set owner of a path (i.e. a file or a directory).
+ The parameters username and groupname cannot both be null.
+ @param p The path
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set access time of a file
+ @param p The path
+ @param mtime Set the modification time of this file.
+ The number of milliseconds since Jan 1, 1970.
+ A value of -1 means that this call should not set modification time.
+ @param atime Set the access time of this file.
+ The number of milliseconds since Jan 1, 1970.
+ A value of -1 means that this call should not set access time.]]>
+ </doc>
+ </method>
+ <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class&lt;? extends org.apache.hadoop.fs.FileSystem&gt;"/>
+ <doc>
+ <![CDATA[Get the statistics for a particular file system
+ @param cls the class to lookup
+ @return a statistics object]]>
+ </doc>
+ </method>
+ <method name="printStatistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="statistics" type="org.apache.hadoop.fs.FileSystem.Statistics"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The statistics for this file system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An abstract base class for a fairly generic filesystem. It
+ may be implemented as a distributed filesystem, or as a "local"
+ one that reflects the locally-connected disk. The local version
+ exists for small Hadoop instances and for testing.
+
+ <p>
+
+ All user code that may potentially use the Hadoop Distributed
+ File System should be written to use a FileSystem object. The
+ Hadoop DFS is a multi-machine system that appears as a single
+ disk. It's useful because of its fault tolerance and potentially
+ very large capacity.
+
+ <p>
+ The local implementation is {@link LocalFileSystem} and distributed
+ implementation is DistributedFileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem -->
+ <!-- start class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <class name="FileSystem.Statistics" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="incrementBytesRead"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes read in the statistics
+ @param newBytes the additional bytes read]]>
+ </doc>
+ </method>
+ <method name="incrementBytesWritten"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes written in the statistics
+ @param newBytes the additional bytes written]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes read
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes written
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <!-- start class org.apache.hadoop.fs.FileUtil -->
+ <class name="FileUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path
+
+ @param stats
+ an array of FileStatus objects
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path.
+ If stats if null, return path
+ @param stats
+ an array of FileStatus objects
+ @param path
+ default path to return in stats is null
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="fullyDelete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a directory and all its contents. If
+ we return false, the directory may be partially-deleted.]]>
+ </doc>
+ </method>
+ <method name="fullyDelete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link FileSystem#delete(Path, boolean)}">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recursively delete a directory.
+
+ @param fs {@link FileSystem} on which the path is present
+ @param dir directory to recursively delete
+ @throws IOException
+ @deprecated Use {@link FileSystem#delete(Path, boolean)}]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copyMerge" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dstFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="addString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy all files in a directory to one output file (merge).]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy local files to a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="java.io.File"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy FileSystem files to local files.]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param filename The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <param name="makeCanonicalPath" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @param makeCanonicalPath
+ Whether to make canonical path for the file passed
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="getDU" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Takes an input dir and returns the du on that local directory. Very basic
+ implementation.
+
+ @param dir
+ The input dir to get the disk space of this local dir
+ @return The total disk space of the input local directory]]>
+ </doc>
+ </method>
+ <method name="unZip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="unzipDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a File input it will unzip the file in a the unzip directory
+ passed as the second parameter
+ @param inFile The zip file as input
+ @param unzipDir The unzip directory where to unzip the zip file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unTar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="untarDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a Tar File as input it will untar the file in a the untar directory
+ passed as the second parameter
+
+ This utility will untar ".tar" files and ".tar.gz","tgz" files.
+
+ @param inFile The tar file as input.
+ @param untarDir The untar directory where to untar the tar file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="symLink" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="linkname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a soft link between a src and destination
+ only on a local disk. HDFS does not support this
+ @param target the target for symlink
+ @param linkname the symlink
+ @return value returned by the command]]>
+ </doc>
+ </method>
+ <method name="chmod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="perm" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Change the permissions on a filename.
+ @param filename the name of the file to change
+ @param perm the permission string
+ @return the exit code from the command
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="createLocalTempFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="basefile" type="java.io.File"/>
+ <param name="prefix" type="java.lang.String"/>
+ <param name="isDeleteOnExit" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a tmp file for a base file.
+ @param basefile the base file of the tmp
+ @param prefix file name prefix of tmp
+ @param isDeleteOnExit if true, the tmp will be deleted when the VM exits
+ @return a newly created tmp file
+ @exception IOException If a tmp file cannot created
+ @see java.io.File#createTempFile(String, String, File)
+ @see java.io.File#deleteOnExit()]]>
+ </doc>
+ </method>
+ <method name="replaceFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="target" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move the src file to the name specified by target.
+ @param src the source file
+ @param target the target file
+ @exception IOException If this operation fails]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of file-processing util methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil -->
+ <!-- start class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <class name="FileUtil.HardLink" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil.HardLink"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createHardLink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.io.File"/>
+ <param name="linkName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a hardlink]]>
+ </doc>
+ </method>
+ <method name="getLinkCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Retrieves the number of links to the specified file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Class for creating hardlinks.
+ Supports Unix, Cygwin, WindXP.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <!-- start class org.apache.hadoop.fs.FilterFileSystem -->
+ <class name="FilterFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FilterFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FilterFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List files in a directory.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param newDir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get file status.]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A <code>FilterFileSystem</code> contains
+ some other file system, which it uses as
+ its basic file system, possibly transforming
+ the data along the way or providing additional
+ functionality. The class <code>FilterFileSystem</code>
+ itself simply overrides all methods of
+ <code>FileSystem</code> with versions that
+ pass all requests to the contained file
+ system. Subclasses of <code>FilterFileSystem</code>
+ may further override some of these methods
+ and may also provide additional methods
+ and fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FilterFileSystem -->
+ <!-- start class org.apache.hadoop.fs.FSDataInputStream -->
+ <class name="FSDataInputStream" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSDataInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="desired" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
+ and buffers input through a {@link BufferedInputStream}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSDataOutputStream -->
+ <class name="FSDataOutputStream" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Syncable"/>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWrappedStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link OutputStream} in a {@link DataOutputStream},
+ buffers output through a {@link BufferedOutputStream} and creates a checksum
+ file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataOutputStream -->
+ <!-- start class org.apache.hadoop.fs.FSError -->
+ <class name="FSError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Thrown for unexpected filesystem errors, presumed to reflect disk errors
+ in the native filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSError -->
+ <!-- start class org.apache.hadoop.fs.FSInputChecker -->
+ <class name="FSInputChecker" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs]]>
+ </doc>
+ </constructor>
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int, boolean, java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs
+ @param sum the type of Checksum engine
+ @param chunkSize maximun chunk size
+ @param checksumSize the number byte of each checksum]]>
+ </doc>
+ </constructor>
+ <method name="readChunk" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads in next checksum chunk data into <code>buf</code> at <code>offset</code>
+ and checksum into <code>checksum</code>.
+ The method is used for implementing read, therefore, it should be optimized
+ for sequential reading
+ @param pos chunkPos
+ @param buf desitination buffer
+ @param offset offset in buf at which to store data
+ @param len maximun number of bytes to read
+ @return number of bytes read]]>
+ </doc>
+ </method>
+ <method name="getChunkPosition" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <doc>
+ <![CDATA[Return position of beginning of chunk containing pos.
+
+ @param pos a postion in the file
+ @return the starting position of the chunk which contains the byte]]>
+ </doc>
+ </method>
+ <method name="needChecksum" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if there is a need for checksum verification]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read one checksum-verified byte
+
+ @return the next byte of data, or <code>-1</code> if the end of the
+ stream is reached.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read checksum verified bytes from this byte-input stream into
+ the specified byte array, starting at the given offset.
+
+ <p> This method implements the general contract of the corresponding
+ <code>{@link InputStream#read(byte[], int, int) read}</code> method of
+ the <code>{@link InputStream}</code> class. As an additional
+ convenience, it attempts to read as many bytes as possible by repeatedly
+ invoking the <code>read</code> method of the underlying stream. This
+ iterated <code>read</code> continues until one of the following
+ conditions becomes true: <ul>
+
+ <li> The specified number of bytes have been read,
+
+ <li> The <code>read</code> method of the underlying stream returns
+ <code>-1</code>, indicating end-of-file.
+
+ </ul> If the first <code>read</code> on the underlying stream returns
+ <code>-1</code> to indicate end-of-file then this method returns
+ <code>-1</code>. Otherwise this method returns the number of bytes
+ actually read.
+
+ @param b destination buffer.
+ @param off offset at which to start storing bytes.
+ @param len maximum number of bytes to read.
+ @return the number of bytes read, or <code>-1</code> if the end of
+ the stream has been reached.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if any checksum error occurs]]>
+ </doc>
+ </method>
+ <method name="checksum2long" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="checksum" type="byte[]"/>
+ <doc>
+ <![CDATA[Convert a checksum byte array to a long]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over and discards <code>n</code> bytes of data from the
+ input stream.
+
+ <p>This method may skip more bytes than are remaining in the backing
+ file. This produces no exception and the number of bytes skipped
+ may include some number of bytes that were beyond the EOF of the
+ backing file. Attempting to read from the stream after skipping past
+ the end will result in -1 indicating the end of the file.
+
+<p>If <code>n</code> is negative, no bytes are skipped.
+
+ @param n the number of bytes to be skipped.
+ @return the actual number of bytes skipped.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to skip to is corrupted]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given position in the stream.
+ The next read() will be from that position.
+
+ <p>This method may seek past the end of the file.
+ This produces no exception and an attempt to read from
+ the stream will result in -1 indicating the end of the file.
+
+ @param pos the postion to seek to.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to seek to is corrupted]]>
+ </doc>
+ </method>
+ <method name="readFully" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="stm" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A utility function that tries to read up to <code>len</code> bytes from
+ <code>stm</code>
+
+ @param stm an input stream
+ @param buf destiniation buffer
+ @param offset offset at which to store data
+ @param len number of bytes to read
+ @return actual number of bytes read
+ @throws IOException if there is any IO error]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="sum" type="java.util.zip.Checksum"/>
+ <param name="maxChunkSize" type="int"/>
+ <param name="checksumSize" type="int"/>
+ <doc>
+ <![CDATA[Set the checksum related parameters
+ @param sum which type of checksum to use
+ @param maxChunkSize maximun chunk size
+ @param checksumSize checksum size]]>
+ </doc>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="readlimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="file" type="org.apache.hadoop.fs.Path"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file name from which data is read from]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This is a generic input stream for verifying checksums for
+ data before it is read by a user.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputChecker -->
+ <!-- start class org.apache.hadoop.fs.FSInputStream -->
+ <class name="FSInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSInputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="seek"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[FSInputStream is a generic old InputStream with a little bit
+ of RAF-style seek ability.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSOutputSummer -->
+ <class name="FSOutputSummer" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSOutputSummer" type="java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="writeChunk"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write one byte]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes <code>len</code> bytes from the specified byte array
+ starting at offset <code>off</code> and generate a checksum for
+ each data chunk.
+
+ <p> This method stores bytes from the given array into this
+ stream's buffer before it gets checksumed. The buffer gets checksumed
+ and flushed to the underlying output stream when all data
+ in a checksum chunk are in the buffer. If the buffer is empty and
+ requested length is at least as large as the size of next checksum chunk
+ size, this method will checksum and write the chunk directly
+ to the underlying output stream. Thus it avoids uneccessary data copy.
+
+ @param b the data.
+ @param off the start offset in the data.
+ @param len the number of bytes to write.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="convertToByteStream" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sum" type="java.util.zip.Checksum"/>
+ <param name="checksumSize" type="int"/>
+ <doc>
+ <![CDATA[Converts a checksum integer value to a byte stream]]>
+ </doc>
+ </method>
+ <method name="resetChecksumChunk"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Resets existing buffer with a new one of the specified size.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is a generic output stream for generating checksums for
+ data before it is written to the underlying stream]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSOutputSummer -->
+ <!-- start class org.apache.hadoop.fs.FsShell -->
+ <class name="FsShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="FsShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentTrashDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the Trash object associated with this shell.]]>
+ </doc>
+ </method>
+ <method name="byteDesc" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="long"/>
+ <doc>
+ <![CDATA[Return an abbreviated English-language desc of the byte length]]>
+ </doc>
+ </method>
+ <method name="limitDecimalTo2" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dateForm" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="modifFmt" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provide command line access to a FileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsShell -->
+ <!-- start class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <class name="FsUrlStreamHandlerFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.net.URLStreamHandlerFactory"/>
+ <constructor name="FsUrlStreamHandlerFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsUrlStreamHandlerFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createURLStreamHandler" return="java.net.URLStreamHandler"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Factory for URL stream handlers.
+
+ There is only one handler whose job is to create UrlConnections. A
+ FsUrlConnection relies on FileSystem to choose the appropriate FS
+ implementation.
+
+ Before returning our handler, we make sure that FileSystem knows an
+ implementation for the requested scheme/protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <!-- start class org.apache.hadoop.fs.HarFileSystem -->
+ <class name="HarFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HarFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[public construction of harfilesystem]]>
+ </doc>
+ </constructor>
+ <constructor name="HarFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor to create a HarFileSystem with an
+ underlying filesystem.
+ @param fs]]>
+ </doc>
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a Har filesystem per har archive. The
+ archive home directory is the top level directory
+ in the filesystem that contains the HAR archive.
+ Be careful with this method, you do not want to go
+ on creating new Filesystem instances per call to
+ path.getFileSystem().
+ the uri of Har is
+ har://underlyingfsscheme-host:port/archivepath.
+ or
+ har:///archivepath. This assumes the underlying filesystem
+ to be used in case not specified.]]>
+ </doc>
+ </method>
+ <method name="getHarVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive.]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the uri of this filesystem.
+ The uri is of the form
+ har://underlyingfsschema-host:port/pathintheunderlyingfs]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[get block locations from the underlying fs
+ @param file the input filestatus to get block locations
+ @param start the start in the file
+ @param len the length in the file
+ @return block locations for this segment of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getHarHash" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[the hash of the path p inside iniside
+ the filesystem
+ @param p the path in the harfilesystem
+ @return the hash code of the path.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[return the filestatus of files in har archive.
+ The permission returned are that of the archive
+ index files. The permissions are not persisted
+ while creating a hadoop archive.
+ @param f the path in har filesystem
+ @return filestatus.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a har input stream which fakes end of
+ file. It reads the index files to get the part
+ file name and the size and start of the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[liststatus returns the children of a directory
+ after looking up the index files.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive path.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[copies the file in the har filesystem to a local file.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permisssion" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <field name="VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is an implementation of the Hadoop Archive
+ Filesystem. This archive Filesystem has index files
+ of the form _index* and has contents of the form
+ part-*. The index files store the indexes of the
+ real files. The index files are of the form _masterindex
+ and _index. The master index is a level of indirection
+ in to the index file to make the look ups faster. the index
+ file is sorted with hash code of the paths that it contains
+ and the master index contains pointers to the positions in
+ index for ranges of hashcodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.HarFileSystem -->
+ <!-- start class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <class name="InMemoryFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InMemoryFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InMemoryFileSystem" type="java.net.URI, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reserveSpaceWithCheckSum" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Register a file with its size. This will also register a checksum for the
+ file that the user is trying to create. This is required since none of
+ the FileSystem APIs accept the size of the file as argument. But since it
+ is required for us to apriori know the size of the file we are going to
+ create, the user must call this method for each file he wants to create
+ and reserve memory for that file. We either succeed in reserving memory
+ for both the main file and the checksum file and return true, or return
+ false.]]>
+ </doc>
+ </method>
+ <method name="getFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getNumFiles" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getFSSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPercentUsed" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of the in-memory filesystem. This implementation assumes
+ that the file lengths are known ahead of time and the total lengths of all
+ the files is below a certain number (like 100 MB, configurable). Use the API
+ reserveSpaceWithCheckSum(Path f, int size) (see below for a description of
+ the API for reserving space in the FS. The uri of this filesystem starts with
+ ramfs:// .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <!-- start class org.apache.hadoop.fs.LocalDirAllocator -->
+ <class name="LocalDirAllocator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalDirAllocator" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an allocator object
+ @param contextCfgItemName]]>
+ </doc>
+ </constructor>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. This method should be used if the size of
+ the file is not known apriori. We go round-robin over the set of disks
+ (via the configured dirs) and return the first complete path where
+ we could create the parent directory of the passed path.
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. Pass size as -1 if not known apriori. We
+ round-robin over the set of disks (via the configured dirs) and return
+ the first complete path which has enough space
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathToRead" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS for reading. We search through all the
+ configured dirs for the file's existence and return the complete
+ path to the file when we find one
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createTmpFileForWrite" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a temporary file in the local FS. Pass size as -1 if not known
+ apriori. We round-robin over the set of disks (via the configured dirs)
+ and select the first complete path which has enough space. A file is
+ created on this directory. The file is guaranteed to go away when the
+ JVM exits.
+ @param pathStr prefix for the temporary file
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return a unique temporary file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isContextValid" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextCfgItemName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Method to check whether a context is valid
+ @param contextCfgItemName
+ @return true/false]]>
+ </doc>
+ </method>
+ <method name="ifExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[We search through all the configured dirs for the file's existence
+ and return true when we find
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return true if files exist. false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of a round-robin scheme for disk allocation for creating
+ files. The way it works is that it is kept track what disk was last
+ allocated for a file write. For the current request, the next disk from
+ the set of disks would be allocated if the free space on the disk is
+ sufficient enough to accomodate the file that is being considered for
+ creation. If the space requirements cannot be met, the next disk in order
+ would be tried and so on till a disk is found with sufficient capacity.
+ Once a disk with sufficient space is identified, a check is done to make
+ sure that the disk is writable. Also, there is an API provided that doesn't
+ take the space requirements into consideration but just checks whether the
+ disk under consideration is writable (this should be used for cases where
+ the file size is not known apriori). An API is provided to read a path that
+ was created earlier. That API works by doing a scan of all the disks for the
+ input pathname.
+ This implementation also provides the functionality of having multiple
+ allocators per JVM (one for each unique functionality or context, like
+ mapred, dfs-client, etc.). It ensures that there is only one instance of
+ an allocator per context per JVM.
+ Note:
+ 1. The contexts referred above are actually the configuration items defined
+ in the Configuration class like "mapred.local.dir" (for which we want to
+ control the dir allocations). The context-strings are exactly those
+ configuration items.
+ 2. This implementation does not take into consideration cases where
+ a disk becomes read-only or goes out of space while a file is being written
+ to (disks are shared between multiple processes, and so the latter situation
+ is probable).
+ 3. In the class implementation, "Disk" is referred to as "Dir", which
+ actually points to the configured directory on the Disk which will be the
+ parent for all file write/read allocations.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalDirAllocator -->
+ <!-- start class org.apache.hadoop.fs.LocalFileSystem -->
+ <class name="LocalFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocalFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRaw" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Moves files to a bad file directory on the same device, so that their
+ storage will not be reused.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the checksumed local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalFileSystem -->
+ <!-- start class org.apache.hadoop.fs.MD5MD5CRC32FileChecksum -->
+ <class name="MD5MD5CRC32FileChecksum" extends="org.apache.hadoop.fs.FileChecksum"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MD5MD5CRC32FileChecksum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Same as this(0, 0, null)]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5MD5CRC32FileChecksum" type="int, long, org.apache.hadoop.io.MD5Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a MD5FileChecksum]]>
+ </doc>
+ </constructor>
+ <method name="getAlgorithmName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="xml" type="org.znerd.xmlenc.XMLOutputter"/>
+ <param name="that" type="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write that object to xml output.]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attrs" type="org.xml.sax.Attributes"/>
+ <exception name="SAXException" type="org.xml.sax.SAXException"/>
+ <doc>
+ <![CDATA[Return the object represented in the attributes.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LENGTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[MD5 of MD5 of CRC32.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.MD5MD5CRC32FileChecksum -->
+ <!-- start class org.apache.hadoop.fs.Path -->
+ <class name="Path" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="Path" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a path from a String. Path strings are URIs, but with
+ unescaped elements and some additional normalization.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Path from components.]]>
+ </doc>
+ </constructor>
+ <method name="toUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this to a URI.]]>
+ </doc>
+ </method>
+ <method name="getFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the FileSystem that owns this Path.]]>
+ </doc>
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True if the directory of this path is absolute.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the final component of this path.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the parent of a path or null if at root.]]>
+ </doc>
+ </method>
+ <method name="suffix" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a suffix to the final name in the path.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="depth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of elements in this path.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <doc>
+ <![CDATA[Returns a qualified path object.]]>
+ </doc>
+ </method>
+ <field name="SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The directory separator, a slash.]]>
+ </doc>
+ </field>
+ <field name="SEPARATOR_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CUR_DIR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Names a file or directory in a {@link FileSystem}.
+ Path strings use slash as the directory separator. A path string is
+ absolute if it begins with a slash.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Path -->
+ <!-- start interface org.apache.hadoop.fs.PathFilter -->
+ <interface name="PathFilter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Tests whether or not the specified abstract pathname should be
+ included in a pathname list.
+
+ @param path The abstract pathname to be tested
+ @return <code>true</code> if and only if <code>pathname</code>
+ should be included]]>
+ </doc>
+ </method>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PathFilter -->
+ <!-- start interface org.apache.hadoop.fs.PositionedReadable -->
+ <interface name="PositionedReadable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read upto the specified number of bytes, from a given
+ position within a file, and return the number of bytes read. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the specified number of bytes, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read number of bytes equalt to the length of the buffer, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits positional reading.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PositionedReadable -->
+ <!-- start class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <class name="RawLocalFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RawLocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the specified directory hierarchy. Does not
+ treat existence as an error.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsWorkingFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chown to set owner.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chmod to set permission.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the raw local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <!-- start interface org.apache.hadoop.fs.Seekable -->
+ <interface name="Seekable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits seeking.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Seekable -->
+ <!-- start interface org.apache.hadoop.fs.Syncable -->
+ <interface name="Syncable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Synchronize all buffer with the underlying devices.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface declare the sync() operation.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Syncable -->
+ <!-- start class org.apache.hadoop.fs.Trash -->
+ <class name="Trash" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Trash" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a trash can accessor.
+ @param conf a Configuration]]>
+ </doc>
+ </constructor>
+ <constructor name="Trash" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a trash can accessor for the FileSystem provided.]]>
+ </doc>
+ </constructor>
+ <method name="moveToTrash" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move a file or directory to the current trash directory.
+ @return false if the item is already in the trash or trash is disabled]]>
+ </doc>
+ </method>
+ <method name="checkpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a trash checkpoint.]]>
+ </doc>
+ </method>
+ <method name="expunge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete old checkpoints.]]>
+ </doc>
+ </method>
+ <method name="getEmptier" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a {@link Runnable} that periodically empties the trash of all
+ users, intended to be run by the superuser. Only one checkpoint is kept
+ at a time.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Run an emptier.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provides a <i>trash</i> feature. Files are moved to a user's trash
+ directory, a subdirectory of their home directory named ".Trash". Files are
+ initially moved to a <i>current</i> sub-directory of the trash directory.
+ Within that sub-directory their original path is preserved. Periodically
+ one may checkpoint the current trash and remove older checkpoints. (This
+ design permits trash management without enumeration of the full trash
+ content, without date support in the filesystem, and without clock
+ synchronization.)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Trash -->
+</package>
+<package name="org.apache.hadoop.fs.ftp">
+ <!-- start class org.apache.hadoop.fs.ftp.FTPException -->
+ <class name="FTPException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.String, java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A class to wrap a {@link Throwable} into a Runtime Exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPException -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <class name="FTPFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A stream obtained via this call must be closed before using other APIs of
+ this class or else the invocation will block.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BLOCK_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} backed by an FTP client provided by <a
+ href="http://commons.apache.org/net/">Apache Commons Net</a>.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPInputStream -->
+ <class name="FTPInputStream" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPInputStream" type="java.io.InputStream, org.apache.commons.net.ftp.FTPClient, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readLimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPInputStream -->
+</package>
+<package name="org.apache.hadoop.fs.kfs">
+ <!-- start class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+ <class name="KosmosFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="KosmosFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return null if the file doesn't exist; otherwise, get the
+ locations of the various chunks of the file file from KFS.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A FileSystem backed by KFS.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.permission">
+ <!-- start class org.apache.hadoop.fs.permission.AccessControlException -->
+ <class name="AccessControlException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AccessControlException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor is needed for unwrapping from
+ {@link org.apache.hadoop.ipc.RemoteException}.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an {@link AccessControlException}
+ with the specified detail message.
+ @param s the detail message.]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.AccessControlException -->
+ <!-- start class org.apache.hadoop.fs.permission.FsAction -->
+ <class name="FsAction" extends="java.lang.Enum&lt;org.apache.hadoop.fs.permission.FsAction&gt;"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.fs.permission.FsAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="implies" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[Return true if this action implies that action.
+ @param that]]>
+ </doc>
+ </method>
+ <method name="and" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[AND operation.]]>
+ </doc>
+ </method>
+ <method name="or" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[OR operation.]]>
+ </doc>
+ </method>
+ <method name="not" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[NOT operation.]]>
+ </doc>
+ </method>
+ <field name="INDEX" type="int"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Octal representation]]>
+ </doc>
+ </field>
+ <field name="SYMBOL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Symbolic representation]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[File system actions, e.g. read, write, etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsAction -->
+ <!-- start class org.apache.hadoop.fs.permission.FsPermission -->
+ <class name="FsPermission" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given {@link FsAction}.
+ @param u user action
+ @param g group action
+ @param o other action]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="short"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given mode.
+ @param mode
+ @see #toShort()]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor
+
+ @param other other permission]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="permission" type="short"/>
+ <doc>
+ <![CDATA[Create an immutable {@link FsPermission} object.]]>
+ </doc>
+ </method>
+ <method name="getUserAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getGroupAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getOtherAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return other {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="fromShort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="short"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link FsPermission} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="toShort" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Encode the object to a short.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply a umask to this permission and return a new one]]>
+ </doc>
+ </method>
+ <method name="getUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="setUMask"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Set the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="getDefault" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default permission.]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unixSymbolicPermission" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create a FsPermission from a Unix symbolic permission string
+ @param unixSymbolicPermission e.g. "-rw-rw-rw-"]]>
+ </doc>
+ </method>
+ <field name="UMASK_LABEL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[umask property label]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_UMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A class for file/directory permissions.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsPermission -->
+ <!-- start class org.apache.hadoop.fs.permission.PermissionStatus -->
+ <class name="PermissionStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="PermissionStatus" type="java.lang.String, java.lang.String, org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <param name="group" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Create an immutable {@link PermissionStatus} object.]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user name]]>
+ </doc>
+ </method>
+ <method name="getGroupName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group name]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return permission]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply umask.
+ @see FsPermission#applyUMask(FsPermission)]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link PermissionStatus} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a {@link PermissionStatus} from its base components.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store permission related information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.PermissionStatus -->
+</package>
+<package name="org.apache.hadoop.fs.s3">
+ <!-- start class org.apache.hadoop.fs.s3.Block -->
+ <class name="Block" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Block" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Holds metadata about a block of data being stored in a {@link FileSystemStore}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.Block -->
+ <!-- start interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <interface name="FileSystemStore" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inode" type="org.apache.hadoop.fs.s3.INode"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="inodeExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveINode" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveBlock" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="byteRangeStart" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listSubPaths" return="java.util.Set&lt;org.apache.hadoop.fs.Path&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listDeepSubPaths" return="java.util.Set&lt;org.apache.hadoop.fs.Path&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="purge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete everything. Used for testing.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="dump"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Diagnostic method to dump all INodes to the console.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for storing and retrieving {@link INode}s and {@link Block}s.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <!-- start class org.apache.hadoop.fs.s3.INode -->
+ <class name="INode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="INode" type="org.apache.hadoop.fs.s3.INode.FileType, org.apache.hadoop.fs.s3.Block[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.fs.s3.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileType" return="org.apache.hadoop.fs.s3.INode.FileType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSerializedLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="serialize" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deserialize" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="FILE_TYPES" type="org.apache.hadoop.fs.s3.INode.FileType[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DIRECTORY_INODE" type="org.apache.hadoop.fs.s3.INode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Holds file metadata including type (regular file, or directory),
+ and the list of blocks that are pointers to the data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.INode -->
+ <!-- start class org.apache.hadoop.fs.s3.MigrationTool -->
+ <class name="MigrationTool" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="MigrationTool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ This class is a tool for migrating data from an older to a newer version
+ of an S3 filesystem.
+ </p>
+ <p>
+ All files in the filesystem are migrated by re-writing the block metadata
+ - no datafiles are touched.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.MigrationTool -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Credentials -->
+ <class name="S3Credentials" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Credentials"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@throws IllegalArgumentException if credentials for S3 cannot be
+ determined.]]>
+ </doc>
+ </method>
+ <method name="getAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSecretAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Extracts AWS credentials from the filesystem URI or configuration.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Credentials -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Exception -->
+ <class name="S3Exception" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Exception" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown if there is a problem communicating with Amazon S3.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Exception -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <class name="S3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="S3FileSystem" type="org.apache.hadoop.fs.s3.FileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[FileStatus for S3 file systems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A block-based {@link FileSystem} backed by
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ </p>
+ @see NativeS3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <class name="S3FileSystemException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystemException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when there is a fatal exception while using {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <!-- start class org.apache.hadoop.fs.s3.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="org.apache.hadoop.fs.s3.S3FileSystemException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when Hadoop cannot read the version of the data stored
+ in {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.VersionMismatchException -->
+</package>
+<package name="org.apache.hadoop.fs.s3native">
+ <!-- start class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+ <class name="NativeS3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeS3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NativeS3FileSystem" type="org.apache.hadoop.fs.s3native.NativeFileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ If <code>f</code> is a file, this method will make a single call to S3.
+ If <code>f</code> is a directory, this method will make a maximum of
+ (<i>n</i> / 1000) + 2 calls to S3, where <i>n</i> is the total number of
+ files and directories contained directly in <code>f</code>.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} for reading and writing files stored on
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation
+ stores files on S3 in their
+ native form so they can be read by other S3 tools.
+ </p>
+ @see org.apache.hadoop.fs.s3.S3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.shell">
+ <!-- start class org.apache.hadoop.fs.shell.Command -->
+ <class name="Command" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Command" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the command's name excluding the leading character -]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the command on the input path
+
+ @param path the input path
+ @throws IOException if any error occurs]]>
+ </doc>
+ </method>
+ <method name="runAll" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[For each source path, execute the command
+
+ @return 0 if it runs successfully; -1 if it fails]]>
+ </doc>
+ </method>
+ <field name="args" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract class for the execution of a file system command]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Command -->
+ <!-- start class org.apache.hadoop.fs.shell.CommandFormat -->
+ <class name="CommandFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CommandFormat" type="java.lang.String, int, int, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor]]>
+ </doc>
+ </constructor>
+ <method name="parse" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="pos" type="int"/>
+ <doc>
+ <![CDATA[Parse parameters starting from the given position
+
+ @param args an array of input arguments
+ @param pos the position at which starts to parse
+ @return a list of parameters]]>
+ </doc>
+ </method>
+ <method name="getOpt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="option" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return if the option is set or not
+
+ @param option String representation of an option
+ @return true is the option is set; false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Parse the args of a command and check the format of args.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.CommandFormat -->
+ <!-- start class org.apache.hadoop.fs.shell.Count -->
+ <class name="Count" extends="org.apache.hadoop.fs.shell.Command"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Count" type="java.lang.String[], int, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param cmd the count command
+ @param pos the starting index of the arguments]]>
+ </doc>
+ </constructor>
+ <method name="matches" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Check if a command is the count command
+
+ @param cmd A string representation of a command starting with "-"
+ @return true if this is a count command; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USAGE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DESCRIPTION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Count the number of directories, files, bytes, quota, and remaining quota.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Count -->
+</package>
+<package name="org.apache.hadoop.http">
+ <!-- start interface org.apache.hadoop.http.FilterContainer -->
+ <interface name="FilterContainer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="addFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map&lt;java.lang.String, java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[Add a filter to the container.
+ @param name Filter name
+ @param classname Filter class name
+ @param parameters a map from parameter names to initial values]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A container class for javax.servlet.Filter.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.http.FilterContainer -->
+ <!-- start class org.apache.hadoop.http.FilterInitializer -->
+ <class name="FilterInitializer" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FilterInitializer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Initialize a javax.servlet.Filter.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.http.FilterInitializer -->
+ <!-- start class org.apache.hadoop.http.HttpServer -->
+ <class name="HttpServer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.http.FilterContainer"/>
+ <constructor name="HttpServer" type="java.lang.String, java.lang.String, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as this(name, bindAddress, port, findPort, null);]]>
+ </doc>
+ </constructor>
+ <constructor name="HttpServer" type="java.lang.String, java.lang.String, int, boolean, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a status server on the given port.
+ The jsp scripts are taken from src/webapps/<name>.
+ @param name The name of the server
+ @param port The port to use on the server
+ @param findPort whether the server should start at the given port and
+ increment by 1 until it finds a free port.
+ @param conf Configuration]]>
+ </doc>
+ </constructor>
+ <method name="addDefaultApps"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="appDir" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add default apps.
+ @param appDir The application directory
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="addDefaultServlets"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Add default servlets.]]>
+ </doc>
+ </method>
+ <method name="addContext"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="dir" type="java.lang.String"/>
+ <param name="isFiltered" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a context
+ @param pathSpec The path spec for the context
+ @param dir The directory containing the context
+ @param isFiltered if true, the servlet is added to the filter path mapping
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Set a value in the webapp context. These values are available to the jsp
+ pages as "application.getAttribute(name)".
+ @param name The name of the attribute
+ @param value The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="addServlet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="clazz" type="java.lang.Class&lt;? extends javax.servlet.http.HttpServlet&gt;"/>
+ <doc>
+ <![CDATA[Add a servlet in the server.
+ @param name The name of the servlet (can be passed as null)
+ @param pathSpec The path spec for the servlet
+ @param clazz The servlet class]]>
+ </doc>
+ </method>
+ <method name="addInternalServlet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="this is a temporary method">
+ <param name="name" type="java.lang.String"/>
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="clazz" type="java.lang.Class&lt;? extends javax.servlet.http.HttpServlet&gt;"/>
+ <doc>
+ <![CDATA[Add an internal servlet in the server.
+ @param name The name of the servlet (can be passed as null)
+ @param pathSpec The path spec for the servlet
+ @param clazz The servlet class
+ @deprecated this is a temporary method]]>
+ </doc>
+ </method>
+ <method name="addFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map&lt;java.lang.String, java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="defineFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="ctx" type="org.mortbay.jetty.servlet.WebApplicationContext"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map&lt;java.lang.String, java.lang.String&gt;"/>
+ <param name="urls" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Define a filter for a context and set up default url mappings.]]>
+ </doc>
+ </method>
+ <method name="addFilterPathMapping"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pathSpec" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add the path spec to the filter path mapping.
+ @param pathSpec The path spec]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value in the webapp context.
+ @param name The name of the attribute
+ @return The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="getWebAppsPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the pathname to the webapps files.
+ @return the pathname as a URL
+ @throws IOException if 'webapps' directory cannot be found on CLASSPATH.]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the port that the server is on
+ @return the port]]>
+ </doc>
+ </method>
+ <method name="setThreads"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="min" type="int"/>
+ <param name="max" type="int"/>
+ </method>
+ <method name="addSslListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="keystore" type="java.lang.String"/>
+ <param name="storPass" type="java.lang.String"/>
+ <param name="keyPass" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Configure an ssl listener on the server.
+ @param addr address to listen on
+ @param keystore location of the keystore
+ @param storPass password for the keystore
+ @param keyPass password for the key]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start the server. Does not wait for the server to start.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[stop the server]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="webServer" type="org.mortbay.jetty.Server"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="webAppContext" type="org.mortbay.jetty.servlet.WebApplicationContext"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="defaultContexts" type="java.util.Map&lt;org.mortbay.jetty.servlet.WebApplicationContext, java.lang.Boolean&gt;"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="findPort" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="listener" type="org.mortbay.http.SocketListener"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="filterNames" type="java.util.List&lt;java.lang.String&gt;"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Create a Jetty embedded server to answer http requests. The primary goal
+ is to serve up status information for the server.
+ There are three contexts:
+ "/logs/" -> points to the log directory
+ "/static/" -> points to common static files (src/webapps/static)
+ "/" -> the jsp server code from (src/webapps/<name>)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.http.HttpServer -->
+ <!-- start class org.apache.hadoop.http.HttpServer.StackServlet -->
+ <class name="HttpServer.StackServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HttpServer.StackServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A very simple servlet to serve up a text representation of the current
+ stack traces. It both returns the stacks to the caller and logs them.
+ Currently the stack traces are done sequentially rather than exactly the
+ same data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.http.HttpServer.StackServlet -->
+</package>
+<package name="org.apache.hadoop.io">
+ <!-- start class org.apache.hadoop.io.AbstractMapWritable -->
+ <class name="AbstractMapWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="AbstractMapWritable"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor.]]>
+ </doc>
+ </constructor>
+ <method name="addToMap"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add a Class to the maps if it is not already present.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="byte"/>
+ <doc>
+ <![CDATA[@return the Class class for the specified id]]>
+ </doc>
+ </method>
+ <method name="getId" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[@return the id for the specified Class]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Used by child copy constructors.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the conf]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@param conf the conf to set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract base class for MapWritable and SortedMapWritable
+
+ Unlike org.apache.nutch.crawl.MapWritable, this class allows creation of
+ MapWritable&lt;Writable, MapWritable&gt; so the CLASS_TO_ID and ID_TO_CLASS
+ maps travel with the class instead of being static.
+
+ Class ids range from 1 to 127 so there can be at most 127 distinct classes
+ in any specific map instance.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.AbstractMapWritable -->
+ <!-- start class org.apache.hadoop.io.ArrayFile -->
+ <class name="ArrayFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A dense file-based mapping from integers to values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Reader -->
+ <class name="ArrayFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an array reader for the named file.]]>
+ </doc>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader before its <code>n</code>th value.]]>
+ </doc>
+ </method>
+ <method name="next" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read and return the next value in the file.]]>
+ </doc>
+ </method>
+ <method name="key" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the key associated with the most recent call to {@link
+ #seek(long)}, {@link #next(Writable)}, or {@link
+ #get(long,Writable)}.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the <code>n</code>th value in the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Reader -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Writer -->
+ <class name="ArrayFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a value to the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Writer -->
+ <!-- start class org.apache.hadoop.io.ArrayWritable -->
+ <class name="ArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ArrayWritable" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;, org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for arrays containing instances of a class. The elements of this
+ writable must all be instances of the same class. If this writable will be
+ the input for a Reducer, you will need to create a subclass that sets the
+ value to be of the proper type.
+
+ For example:
+ <code>
+ public class IntArrayWritable extends ArrayWritable {
+ public IntArrayWritable() {
+ super(IntWritable.class);
+ }
+ }
+ </code>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayWritable -->
+ <!-- start class org.apache.hadoop.io.BinaryComparable -->
+ <class name="BinaryComparable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable&lt;org.apache.hadoop.io.BinaryComparable&gt;"/>
+ <constructor name="BinaryComparable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLength" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return n st bytes 0..n-1 from {#getBytes()} are valid.]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return representative byte array for this instance.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.BinaryComparable"/>
+ <doc>
+ <![CDATA[Compare bytes from {#getBytes()}.
+ @see org.apache.hadoop.io.WritableComparator#compareBytes(byte[],int,int,byte[],int,int)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Compare bytes from {#getBytes()} to those provided.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true if bytes from {#getBytes()} match.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a hash of the bytes returned from {#getBytes()}.
+ @see org.apache.hadoop.io.WritableComparator#hashBytes(byte[],int)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface supported by {@link org.apache.hadoop.io.WritableComparable}
+ types supporting ordering/permutation by a representative set of bytes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BinaryComparable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable -->
+ <class name="BooleanWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BooleanWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BooleanWritable" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="get" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for booleans.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <class name="BooleanWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BooleanWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BooleanWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.BytesWritable -->
+ <class name="BytesWritable" extends="org.apache.hadoop.io.BinaryComparable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable&lt;org.apache.hadoop.io.BinaryComparable&gt;"/>
+ <constructor name="BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-size sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="BytesWritable" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a BytesWritable using the byte array as the initial value.
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the BytesWritable.
+ @return The data is only valid between 0 and getLength() - 1.]]>
+ </doc>
+ </method>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #getBytes()} instead.">
+ <doc>
+ <![CDATA[Get the data from the BytesWritable.
+ @deprecated Use {@link #getBytes()} instead.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current size of the buffer.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #getLength()} instead.">
+ <doc>
+ <![CDATA[Get the current size of the buffer.
+ @deprecated Use {@link #getLength()} instead.]]>
+ </doc>
+ </method>
+ <method name="setSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Change the size of the buffer. The values in the old range are preserved
+ and any new values are undefined. The capacity is changed if it is
+ necessary.
+ @param size The new number of bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum size that could handled without
+ resizing the backing storage.
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_cap" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved.
+ @param new_cap The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="org.apache.hadoop.io.BytesWritable"/>
+ <doc>
+ <![CDATA[Set the BytesWritable to the contents of the given newData.
+ @param newData the value to set this BytesWritable to.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Set the value to a copy of the given byte range
+ @param newData the new values to copy in
+ @param offset the offset in newData to start at
+ @param length the number of bytes to copy]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="right_obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Are the two byte sequences equal?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Generate the stream of bytes as hex pairs separated by ' '.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is usable as a key or value.
+ It is resizable and distinguishes between the size of the seqeunce and
+ the current capacity. The hash function is the front of the md5 of the
+ buffer. The sort order is the same as memcmp.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable -->
+ <!-- start class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <class name="BytesWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BytesWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BytesWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ByteWritable -->
+ <class name="ByteWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="ByteWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ByteWritable" type="byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Set the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a ByteWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two ByteWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for a single byte.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable -->
+ <!-- start class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <class name="ByteWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ByteWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for ByteWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <!-- start interface org.apache.hadoop.io.Closeable -->
+ <interface name="Closeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="use java.io.Closeable">
+ <implements name="java.io.Closeable"/>
+ <doc>
+ <![CDATA[@deprecated use java.io.Closeable]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Closeable -->
+ <!-- start class org.apache.hadoop.io.CompressedWritable -->
+ <class name="CompressedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="CompressedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ensureInflated"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Must be called by all methods which access fields to ensure that the data
+ has been uncompressed.]]>
+ </doc>
+ </method>
+ <method name="readFieldsCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #readFields(DataInput)}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #write(DataOutput)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base-class for Writables which store themselves compressed and lazily
+ inflate on field access. This is useful for large objects whose fields are
+ not be altered during a map or reduce operation: leaving the field data
+ compressed makes copying the instance from one file to another much
+ faster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.CompressedWritable -->
+ <!-- start class org.apache.hadoop.io.DataInputBuffer -->
+ <class name="DataInputBuffer" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataInputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataInput} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataInputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataInputBuffer buffer = new DataInputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using DataInput methods ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataInputBuffer -->
+ <!-- start class org.apache.hadoop.io.DataOutputBuffer -->
+ <class name="DataOutputBuffer" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataOutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <constructor name="DataOutputBuffer" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a DataInput directly into the buffer.]]>
+ </doc>
+ </method>
+ <method name="writeTo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write to a file stream]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataOutput} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataOutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataOutputBuffer buffer = new DataOutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using DataOutput methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataOutputBuffer -->
+ <!-- start class org.apache.hadoop.io.DefaultStringifier -->
+ <class name="DefaultStringifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Stringifier&lt;T&gt;"/>
+ <constructor name="DefaultStringifier" type="org.apache.hadoop.conf.Configuration, java.lang.Class&lt;T&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="fromString" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="store"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="item" type="K"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the item in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to store
+ @param item the object to be stored
+ @param keyName the name of the key to use
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="load" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class&lt;K&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="storeArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="items" type="K[]"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the array of items in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param items the objects to be stored
+ @param keyName the name of the key to use
+ @throws IndexOutOfBoundsException if the items array is empty
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="loadArray" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class&lt;K&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the array of objects from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[DefaultStringifier is the default implementation of the {@link Stringifier}
+ interface which stringifies the objects using base64 encoding of the
+ serialized version of the objects. The {@link Serializer} and
+ {@link Deserializer} are obtained from the {@link SerializationFactory}.
+ <br>
+ DefaultStringifier offers convenience methods to store/load objects to/from
+ the configuration.
+
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DefaultStringifier -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable -->
+ <class name="DoubleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="DoubleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DoubleWritable" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="double"/>
+ </method>
+ <method name="get" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a DoubleWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Writable for Double values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <class name="DoubleWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DoubleWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for DoubleWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.FloatWritable -->
+ <class name="FloatWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="FloatWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FloatWritable" type="float"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="float"/>
+ <doc>
+ <![CDATA[Set the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a FloatWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two FloatWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for floats.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable -->
+ <!-- start class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <class name="FloatWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FloatWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for FloatWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.GenericWritable -->
+ <class name="GenericWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="GenericWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Set the instance that is wrapped.
+
+ @param obj]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the wrapped instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTypes" return="java.lang.Class[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return all classes that may be wrapped. Subclasses should implement this
+ to return a constant array of classes.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper for Writable instances.
+ <p>
+ When two sequence files, which have same Key type but different Value
+ types, are mapped out to reduce, multiple Value types is not allowed.
+ In this case, this class can help you wrap instances with different types.
+ </p>
+
+ <p>
+ Compared with <code>ObjectWritable</code>, this class is much more effective,
+ because <code>ObjectWritable</code> will append the class declaration as a String
+ into the output file in every Key-Value pair.
+ </p>
+
+ <p>
+ Generic Writable implements {@link Configurable} interface, so that it will be
+ configured by the framework. The configuration is passed to the wrapped objects
+ implementing {@link Configurable} interface <i>before deserialization</i>.
+ </p>
+
+ how to use it: <br>
+ 1. Write your own class, such as GenericObject, which extends GenericWritable.<br>
+ 2. Implements the abstract method <code>getTypes()</code>, defines
+ the classes which will be wrapped in GenericObject in application.
+ Attention: this classes defined in <code>getTypes()</code> method, must
+ implement <code>Writable</code> interface.
+ <br><br>
+
+ The code looks like this:
+ <blockquote><pre>
+ public class GenericObject extends GenericWritable {
+
+ private static Class[] CLASSES = {
+ ClassType1.class,
+ ClassType2.class,
+ ClassType3.class,
+ };
+
+ protected Class[] getTypes() {
+ return CLASSES;
+ }
+
+ }
+ </pre></blockquote>
+
+ @since Nov 8, 2006]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.GenericWritable -->
+ <!-- start class org.apache.hadoop.io.InputBuffer -->
+ <class name="InputBuffer" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link InputStream} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new InputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ InputBuffer buffer = new InputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using InputStream methods ...
+ }
+ </pre>
+ @see DataInputBuffer
+ @see DataOutput]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.InputBuffer -->
+ <!-- start class org.apache.hadoop.io.IntWritable -->
+ <class name="IntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="IntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="IntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a IntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two IntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for ints.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable -->
+ <!-- start class org.apache.hadoop.io.IntWritable.Comparator -->
+ <class name="IntWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IntWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for IntWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.IOUtils -->
+ <class name="IOUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="buffSize" type="int"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param buffSize the size of the buffer
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another. <strong>closes the input and output streams
+ at the end</strong>.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads len bytes in a loop.
+ @param in The InputStream to read from
+ @param buf The buffer to fill
+ @param off offset from the buffer
+ @param len the length of bytes to read
+ @throws IOException if it could not read requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Similar to readFully(). Skips bytes in a loop.
+ @param in The InputStream to skip bytes from
+ @param len number of bytes to skip.
+ @throws IOException if it could not skip requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="closeables" type="java.io.Closeable[]"/>
+ <doc>
+ <![CDATA[Close the Closeable objects and <b>ignore</b> any {@link IOException} or
+ null pointers. Must only be used for cleanup in exception handlers.
+ @param log the log to record problems to at debug level. Can be null.
+ @param closeables the objects to close]]>
+ </doc>
+ </method>
+ <method name="closeStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Closeable"/>
+ <doc>
+ <![CDATA[Closes the stream ignoring {@link IOException}.
+ Must only be called in cleaning up from exception handlers.
+ @param stream the Stream to close]]>
+ </doc>
+ </method>
+ <method name="closeSocket"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <doc>
+ <![CDATA[Closes the socket ignoring {@link IOException}
+ @param sock the Socket to close]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An utility class for I/O related functionality.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils -->
+ <!-- start class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <class name="IOUtils.NullOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils.NullOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[/dev/null of OutputStreams.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <!-- start class org.apache.hadoop.io.LongWritable -->
+ <class name="LongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="LongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a LongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two LongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable -->
+ <!-- start class org.apache.hadoop.io.LongWritable.Comparator -->
+ <class name="LongWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <class name="LongWritable.DecreasingComparator" extends="org.apache.hadoop.io.LongWritable.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.DecreasingComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A decreasing Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <!-- start class org.apache.hadoop.io.MapFile -->
+ <class name="MapFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="oldName" type="java.lang.String"/>
+ <param name="newName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames an existing map directory.]]>
+ </doc>
+ </method>
+ <method name="delete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deletes the named map file.]]>
+ </doc>
+ </method>
+ <method name="fix" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"/>
+ <param name="valueClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"/>
+ <param name="dryrun" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This method attempts to fix a corrupt MapFile by re-creating its index.
+ @param fs filesystem
+ @param dir directory containing the MapFile data and index
+ @param keyClass key class (has to be a subclass of Writable)
+ @param valueClass value class (has to be a subclass of Writable)
+ @param dryrun do not perform any changes, just report what needs to be done
+ @return number of valid entries in this MapFile, or -1 if no fixing was needed
+ @throws Exception]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="INDEX_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the index file.]]>
+ </doc>
+ </field>
+ <field name="DATA_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the data file.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A file-based map from keys to values.
+
+ <p>A map is a directory containing two files, the <code>data</code> file,
+ containing all keys and values in the map, and a smaller <code>index</code>
+ file, containing a fraction of the keys. The fraction is determined by
+ {@link Writer#getIndexInterval()}.
+
+ <p>The index file is read entirely into memory. Thus key implementations
+ should try to keep themselves small.
+
+ <p>Map files are created by adding entries in-order. To maintain a large
+ database, perform updates by copying the previous version of a database and
+ merging in a sorted change list, to create a new version of the database in
+ a new file. Sorting large change lists can be done with {@link
+ SequenceFile.Sorter}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile -->
+ <!-- start class org.apache.hadoop.io.MapFile.Reader -->
+ <class name="MapFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map using the named comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Hook to allow subclasses to defer opening streams until further
+ initialization is complete.
+ @see #createDataFileReader(FileSystem, Path, Configuration)]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="open"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dirName" type="java.lang.String"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDataFileReader" return="org.apache.hadoop.io.SequenceFile.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dataFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link SequenceFile.Reader} returned.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Re-positions the reader before its first key.]]>
+ </doc>
+ </method>
+ <method name="midKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the key at approximately the middle of the file.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalKey"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the final key from the file.
+
+ @param key key to read into]]>
+ </doc>
+ </method>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader at the named key, or if none such exists, at the
+ first entry after the named key. Returns true iff the named key exists
+ in this map.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the map into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ the end of the map]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the value for the named key, or null if none exists.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+ Returns <code>key</code> or if it does not exist, at the first entry
+ after the named key.
+
+- * @param key - key that we're trying to find
+- * @param val - data value if key is found
+- * @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <param name="before" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+
+ @param key - key that we're trying to find
+ @param val - data value if key is found
+ @param before - IF true, and <code>key</code> does not exist, return
+ the first entry that falls just before the <code>key</code>. Otherwise,
+ return the record that sorts just after.
+ @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Reader -->
+ <!-- start class org.apache.hadoop.io.MapFile.Writer -->
+ <class name="MapFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <method name="getIndexInterval" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of entries that are added before an index entry is added.]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval.
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval and stores it in conf
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair to the map. The key must be greater or equal
+ to the previous key added to the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writes a new map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Writer -->
+ <!-- start class org.apache.hadoop.io.MapWritable -->
+ <class name="MapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Map&lt;org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapWritable" type="org.apache.hadoop.io.MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map&lt;? extends org.apache.hadoop.io.Writable, ? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable Map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapWritable -->
+ <!-- start class org.apache.hadoop.io.MD5Hash -->
+ <class name="MD5Hash" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable&lt;org.apache.hadoop.io.MD5Hash&gt;"/>
+ <constructor name="MD5Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash from a hex string.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash with a specified value.]]>
+ </doc>
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs, reads and returns an instance.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+ <doc>
+ <![CDATA[Copy the contents of another instance into this instance.]]>
+ </doc>
+ </method>
+ <method name="getDigest" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the digest bytes.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a hash value for the content from the InputStream.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="halfDigest" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a half-sized version of this MD5. Fits in a long]]>
+ </doc>
+ </method>
+ <method name="quarterDigest" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a 32-bit digest of the MD5.
+ @return the first 4 bytes of the md5]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is an MD5Hash whose digest contains the
+ same values.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for this object.
+ Only uses the first 4 bytes, since md5s are evenly distributed.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+ <doc>
+ <![CDATA[Compares this object with the specified object for order.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="setDigest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the digest value from a hex string.]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Writable for MD5 hash values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash -->
+ <!-- start class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <class name="MD5Hash.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MD5Hash.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for MD5Hash keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <!-- start class org.apache.hadoop.io.MultipleIOException -->
+ <class name="MultipleIOException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getExceptions" return="java.util.List&lt;java.io.IOException&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the underlying exceptions]]>
+ </doc>
+ </method>
+ <method name="createIOException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="exceptions" type="java.util.List&lt;java.io.IOException&gt;"/>
+ <doc>
+ <![CDATA[A convenient method to create an {@link IOException}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Encapsulate a list of {@link IOException} into an {@link IOException}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MultipleIOException -->
+ <!-- start class org.apache.hadoop.io.NullWritable -->
+ <class name="NullWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <method name="get" return="org.apache.hadoop.io.NullWritable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the single instance of this class.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Singleton Writable with no data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable -->
+ <!-- start class org.apache.hadoop.io.NullWritable.Comparator -->
+ <class name="NullWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator &quot;optimized&quot; for NullWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ObjectWritable -->
+ <class name="ObjectWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="ObjectWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Class, java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the instance, or null if none.]]>
+ </doc>
+ </method>
+ <method name="getDeclaredClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the class this is meant to be.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Reset the instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeObject"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="instance" type="java.lang.Object"/>
+ <param name="declaredClass" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="objectWritable" type="org.apache.hadoop.io.ObjectWritable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A polymorphic Writable that writes an instance with it's class name.
+ Handles arrays, strings and primitive types without a Writable wrapper.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ObjectWritable -->
+ <!-- start class org.apache.hadoop.io.OutputBuffer -->
+ <class name="OutputBuffer" extends="java.io.FilterOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.OutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a InputStream directly into the buffer.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link OutputStream} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new OutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ OutputBuffer buffer = new OutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using OutputStream methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>
+ @see DataOutputBuffer
+ @see InputBuffer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.OutputBuffer -->
+ <!-- start interface org.apache.hadoop.io.RawComparator -->
+ <interface name="RawComparator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Comparator&lt;T&gt;"/>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link Comparator} that operates directly on byte representations of
+ objects.
+ </p>
+ @param <T>
+ @see DeserializerComparator]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.RawComparator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile -->
+ <class name="SequenceFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the compression type for the reduce outputs
+ @param job the job config to look in
+ @return the kind of compression to use
+ @deprecated Use
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the compression type for sequence files.
+ @param job the configuration to modify
+ @param val the new compression type (none, block, record)
+ @deprecated Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param bufferSize buffer size for the underlaying outputstream.
+ @param replication replication factor for the file.
+ @param blockSize block size for the file.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="SYNC_INTERVAL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes between sync points.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<code>SequenceFile</code>s are flat files consisting of binary key/value
+ pairs.
+
+ <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and
+ {@link Sorter} classes for writing, reading and sorting respectively.</p>
+
+ There are three <code>SequenceFile</code> <code>Writer</code>s based on the
+ {@link CompressionType} used to compress key/value pairs:
+ <ol>
+ <li>
+ <code>Writer</code> : Uncompressed records.
+ </li>
+ <li>
+ <code>RecordCompressWriter</code> : Record-compressed files, only compress
+ values.
+ </li>
+ <li>
+ <code>BlockCompressWriter</code> : Block-compressed files, both keys &
+ values are collected in 'blocks'
+ separately and compressed. The size of
+ the 'block' is configurable.
+ </ol>
+
+ <p>The actual compression algorithm used to compress key and/or values can be
+ specified by using the appropriate {@link CompressionCodec}.</p>
+
+ <p>The recommended way is to use the static <tt>createWriter</tt> methods
+ provided by the <code>SequenceFile</code> to chose the preferred format.</p>
+
+ <p>The {@link Reader} acts as the bridge and can read any of the above
+ <code>SequenceFile</code> formats.</p>
+
+ <h4 id="Formats">SequenceFile Formats</h4>
+
+ <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
+ depending on the <code>CompressionType</code> specified. All of them share a
+ <a href="#Header">common header</a> described below.
+
+ <h5 id="Header">SequenceFile Header</h5>
+ <ul>
+ <li>
+ version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual
+ version number (e.g. SEQ4 or SEQ6)
+ </li>
+ <li>
+ keyClassName -key class
+ </li>
+ <li>
+ valueClassName - value class
+ </li>
+ <li>
+ compression - A boolean which specifies if compression is turned on for
+ keys/values in this file.
+ </li>
+ <li>
+ blockCompression - A boolean which specifies if block-compression is
+ turned on for keys/values in this file.
+ </li>
+ <li>
+ compression codec - <code>CompressionCodec</code> class which is used for
+ compression of keys and/or values (if compression is
+ enabled).
+ </li>
+ <li>
+ metadata - {@link Metadata} for this file.
+ </li>
+ <li>
+ sync - A sync marker to denote end of the header.
+ </li>
+ </ul>
+
+ <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li>Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li><i>Compressed</i> Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record <i>Block</i>
+ <ul>
+ <li>Compressed key-lengths block-size</li>
+ <li>Compressed key-lengths block</li>
+ <li>Compressed keys block-size</li>
+ <li>Compressed keys block</li>
+ <li>Compressed value-lengths block-size</li>
+ <li>Compressed value-lengths block</li>
+ <li>Compressed values block-size</li>
+ <li>Compressed values block</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <p>The compressed blocks of key lengths and value lengths consist of the
+ actual lengths of individual keys/values encoded in ZeroCompressedInteger
+ format.</p>
+
+ @see CompressionCodec]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <class name="SequenceFile.CompressionType" extends="java.lang.Enum&lt;org.apache.hadoop.io.SequenceFile.CompressionType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.SequenceFile.CompressionType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression type used to compress key/value pairs in the
+ {@link SequenceFile}.
+
+ @see SequenceFile.Writer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <class name="SequenceFile.Metadata" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFile.Metadata" type="java.util.TreeMap&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="getMetadata" return="java.util.TreeMap&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The class encapsulating with the metadata of a file.
+ The metadata of a file is a list of attribute name/value
+ pairs of Text type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Reader -->
+ <class name="SequenceFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Reader" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the named file.]]>
+ </doc>
+ </constructor>
+ <method name="openFile" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link FSDataInputStream} returned.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the key class.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the value class.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="isCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if values are compressed.]]>
+ </doc>
+ </method>
+ <method name="isBlockCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if records are block-compressed.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="getMetadata" return="org.apache.hadoop.io.SequenceFile.Metadata"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the metadata object of the file]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file into <code>key</code>, skipping its
+ value. True if another entry exists, and false at end of file.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the file into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ end of file]]>
+ </doc>
+ </method>
+ <method name="next" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.">
+ <param name="buffer" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.]]>
+ </doc>
+ </method>
+ <method name="createValueBytes" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRaw" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' records.
+ @param key - The buffer into which the key is read
+ @param val - The 'raw' value
+ @return Returns the total record length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawKey" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' keys.
+ @param key - The buffer into which the key is read
+ @return Returns the key length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="java.lang.Object"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file, skipping its
+ value. Return null at end of file.]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' values.
+ @param val - The 'raw' value
+ @return Returns the value length
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the current byte position in the input file.
+
+ <p>The position passed must be a position returned by {@link
+ SequenceFile.Writer#getLength()} when writing this file. To seek to an arbitrary
+ position, use {@link SequenceFile.Reader#sync(long)}.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the next sync mark past a given position.]]>
+ </doc>
+ </method>
+ <method name="syncSeen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true iff the previous call to next passed a sync mark.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current byte position in the input file.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reads key/value pairs from a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <class name="SequenceFile.Sorter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge files containing the named classes.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.io.RawComparator, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge using an arbitrary {@link RawComparator}.]]>
+ </doc>
+ </constructor>
+ <method name="setFactor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="factor" type="int"/>
+ <doc>
+ <![CDATA[Set the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="getFactor" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="setMemory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="memory" type="int"/>
+ <doc>
+ <![CDATA[Set the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="getMemory" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setProgressable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progressable" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Set the progressable object in order to report progress.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files into an output file.
+ @param inFiles the files to be sorted
+ @param outFile the sorted output file
+ @param deleteInput should the input files be deleted as they are read?]]>
+ </doc>
+ </method>
+ <method name="sortAndIterate" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files and return an iterator.
+ @param inFiles the files to be sorted
+ @param tempDir the directory where temp files are created during sort
+ @param deleteInput should the input files be deleted as they are read?
+ @return iterator the RawKeyValueIterator]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The backwards compatible interface to sort.
+ @param inFile the input file to sort
+ @param outFile the sorted output file]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="segments" type="java.util.List&lt;org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor&gt;"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the list of segments of type <code>SegmentDescriptor</code>
+ @param segments the list of SegmentDescriptors
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIterator
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[] using a max factor value
+ that is already set
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="factor" type="int"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param factor the factor that will be used as the maximum merge fan-in
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInputs" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param tempDir the directory for creating temp files during merge
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cloneFileAttributes" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="prog" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clones the attributes (like compression of the input file and creates a
+ corresponding Writer
+ @param inputFile the path of the input file whose attributes should be
+ cloned
+ @param outputFile the path of the output file
+ @param prog the Progressable to report status during the file write
+ @return Writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="records" type="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"/>
+ <param name="writer" type="org.apache.hadoop.io.SequenceFile.Writer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes records from RawKeyValueIterator into a file represented by the
+ passed writer
+ @param records the RawKeyValueIterator
+ @param writer the Writer created earlier
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merge the provided files.
+ @param inFiles the array of input path names
+ @param outFile the final output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sorts key/value pairs in a sequence-format file.
+
+ <p>For best performance, applications should make sure that the {@link
+ Writable#readFields(DataInput)} implementation of their keys is
+ very efficient. In particular, it should avoid allocating memory.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <interface name="SequenceFile.Sorter.RawKeyValueIterator" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw key
+ @return DataOutputBuffer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getValue" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw value
+ @return ValueBytes
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets up the current key and value (for getKey and getValue)
+ @return true if there exists a key/value, false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[closes the iterator so that the underlying streams can be closed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the Progress object; this has a float (0.0 - 1.0)
+ indicating the bytes processed by the iterator so far]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to iterate over raw keys/values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <class name="SequenceFile.Sorter.SegmentDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="SequenceFile.Sorter.SegmentDescriptor" type="long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a segment
+ @param segmentOffset the offset of the segment in the file
+ @param segmentLength the length of the segment
+ @param segmentPathName the path name of the file containing the segment]]>
+ </doc>
+ </constructor>
+ <method name="doSync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do the sync checks]]>
+ </doc>
+ </method>
+ <method name="preserveInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="preserve" type="boolean"/>
+ <doc>
+ <![CDATA[Whether to delete the files when no longer needed]]>
+ </doc>
+ </method>
+ <method name="shouldPreserveInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRawKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the rawKey object with the key returned by the Reader
+ @return true if there is a key returned; false, otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rawValue" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the passed rawValue with the value corresponding to the key
+ read earlier
+ @param rawValue
+ @return the length of the value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the stored rawKey]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The default cleanup. Subclasses can override this with a custom
+ cleanup]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class defines a merge segment. This class can be subclassed to
+ provide a customized cleanup method implementation. In this
+ implementation, cleanup closes the file handle and deletes the file]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <interface name="SequenceFile.ValueBytes" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the uncompressed bytes to the outStream.
+ @param outStream : Stream to write uncompressed bytes into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to outStream.
+ Note: that it will NOT compress the bytes if they are not compressed.
+ @param outStream : Stream to write compressed bytes into.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Size of stored data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to 'raw' values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Writer -->
+ <class name="SequenceFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, int, short, long, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a sync point]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="appendRaw"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keyData" type="byte[]"/>
+ <param name="keyOffset" type="int"/>
+ <param name="keyLength" type="int"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current length of the output file.
+
+ <p>This always returns a synchronized position. In other words,
+ immediately after calling {@link SequenceFile.Reader#seek(long)} with a position
+ returned by this method, {@link SequenceFile.Reader#next(Writable)} may be called. However
+ the key may be earlier in the file than key last written when this
+ method was called (e.g., with block-compression, it may be the first key
+ in the block that was being written when this method was called).]]>
+ </doc>
+ </method>
+ <field name="keySerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="uncompressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="compressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Write key/value pairs to a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SetFile -->
+ <class name="SetFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A file-based set of keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile -->
+ <!-- start class org.apache.hadoop.io.SetFile.Reader -->
+ <class name="SetFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set using the named comparator.]]>
+ </doc>
+ </constructor>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in a set into <code>key</code>. Returns
+ true if such a key exists and false when at the end of the set.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the matching key from a set into <code>key</code>.
+ Returns <code>key</code>, or null if no match exists.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SetFile.Writer -->
+ <class name="SetFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="pass a Configuration too">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named set for keys of the named class.
+ @deprecated pass a Configuration too]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element class and compression type.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element comparator and compression type.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key to a set. The key must be strictly greater than the
+ previous key added to the set.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SortedMapWritable -->
+ <class name="SortedMapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="SortedMapWritable" type="org.apache.hadoop.io.SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="comparator" return="java.util.Comparator&lt;? super org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="firstKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="headMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="lastKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="subMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="tailMap" return="java.util.SortedMap&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set&lt;org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map&lt;? extends org.apache.hadoop.io.WritableComparable, ? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable SortedMap.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SortedMapWritable -->
+ <!-- start interface org.apache.hadoop.io.Stringifier -->
+ <interface name="Stringifier" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Converts the object to a string representation
+ @param obj the object to convert
+ @return the string representation of the object
+ @throws IOException if the object cannot be converted]]>
+ </doc>
+ </method>
+ <method name="fromString" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from its string representation.
+ @param str the string representation of the object
+ @return restored object
+ @throws IOException if the object cannot be restored]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes this object.
+ @throws IOException if an I/O error occurs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stringifier interface offers two methods to convert an object
+ to a string representation and restore the object given its
+ string representation.
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Stringifier -->
+ <!-- start class org.apache.hadoop.io.Text -->
+ <class name="Text" extends="org.apache.hadoop.io.BinaryComparable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable&lt;org.apache.hadoop.io.BinaryComparable&gt;"/>
+ <constructor name="Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Text" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a string.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="org.apache.hadoop.io.Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from another text.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a byte array.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the raw bytes; however, only data up to {@link #getLength()} is
+ valid.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of bytes in the byte array]]>
+ </doc>
+ </method>
+ <method name="charAt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="int"/>
+ <doc>
+ <![CDATA[Returns the Unicode Scalar Value (32-bit integer value)
+ for the character at <code>position</code>. Note that this
+ method avoids using the converter or doing String instatiation
+ @return the Unicode scalar value at position or -1
+ if the position is invalid or points to a
+ trailing byte]]>
+ </doc>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Finds any occurence of <code>what</code> in the backing
+ buffer, starting as position <code>start</code>. The starting
+ position is measured in bytes and the return value is in
+ terms of byte position in the buffer. The backing buffer is
+ not converted to a string for this operation.
+ @return byte position of the first occurence of the search
+ string in the UTF-8 buffer or -1 if not found]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <doc>
+ <![CDATA[Set to a utf8 byte array]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[copy a text.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Set the Text to range of bytes
+ @param utf8 the data to copy from
+ @param start the first position of the new string
+ @param len the number of bytes of the new string]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Append a range of bytes to the end of the given text
+ @param utf8 the data to copy from
+ @param start the first position to append from utf8
+ @param len the number of bytes to append]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Clear the string to empty.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert text back to string
+ @see java.lang.Object#toString()]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialize]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one Text in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialize
+ write this object to out
+ length uses zero-compressed encoding
+ @see Writable#write(DataOutput)]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a Text with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If the input is malformed,
+ replace by a default value.]]>
+ </doc>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If the input is malformed,
+ invalid chars are replaced by a default value.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF8 encoded string from in]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF8 encoded string to out]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check if a byte array contains valid utf-8
+ @param utf8 byte array
+ @throws MalformedInputException if the byte array contains invalid utf-8]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check to see if a byte array is valid utf-8
+ @param utf8 the array of bytes
+ @param start the offset of the first byte in the array
+ @param len the length of the byte sequence
+ @throws MalformedInputException if the byte array contains invalid bytes]]>
+ </doc>
+ </method>
+ <method name="bytesToCodePoint" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="java.nio.ByteBuffer"/>
+ <doc>
+ <![CDATA[Returns the next code point at the current position in
+ the buffer. The buffer's position will be incremented.
+ Any mark set on this buffer will be changed by this method!]]>
+ </doc>
+ </method>
+ <method name="utf8Length" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[For the given string, returns the number of UTF-8 bytes
+ required to encode the string.
+ @param string text to encode
+ @return number of UTF-8 bytes required to encode]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class stores text using standard UTF8 encoding. It provides methods
+ to serialize, deserialize, and compare texts at byte level. The type of
+ length is integer and is serialized using zero-compressed format. <p>In
+ addition, it provides methods for string traversal without converting the
+ byte array to a string. <p>Also includes utilities for
+ serializing/deserialing a string, coding/decoding a string, checking if a
+ byte array contains valid UTF8 code, calculating the length of an encoded
+ string.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text -->
+ <!-- start class org.apache.hadoop.io.Text.Comparator -->
+ <class name="Text.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Text.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for Text keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text.Comparator -->
+ <!-- start class org.apache.hadoop.io.TwoDArrayWritable -->
+ <class name="TwoDArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[][]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[][]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[][]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for 2D arrays containing a matrix of instances of a class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.TwoDArrayWritable -->
+ <!-- start class org.apache.hadoop.io.UTF8 -->
+ <class name="UTF8" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="replaced by Text">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UTF8" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <constructor name="UTF8" type="org.apache.hadoop.io.UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw bytes.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the encoded string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one UTF8 in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare two UTF8s.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert to a String.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a UTF8 with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convert a string to a UTF-8 encoded byte array.
+ @see String#getBytes(String)]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string.
+
+ @see DataInput#readUTF()]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF-8 encoded string.
+
+ @see DataOutput#writeUTF(String)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for strings that uses the UTF8 encoding.
+
+ <p>Also includes utilities for efficiently reading and writing UTF-8.
+
+ @deprecated replaced by Text]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8 -->
+ <!-- start class org.apache.hadoop.io.UTF8.Comparator -->
+ <class name="UTF8.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UTF8.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8.Comparator -->
+ <!-- start class org.apache.hadoop.io.VersionedWritable -->
+ <class name="VersionedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="VersionedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="byte"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the version number of the current implementation.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A base class for Writables that provides version checking.
+
+ <p>This is useful when a class may evolve, so that instances written by the
+ old version of the class may still be processed by the new version. To
+ handle this situation, {@link #readFields(DataInput)}
+ implementations should catch {@link VersionMismatchException}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionedWritable -->
+ <!-- start class org.apache.hadoop.io.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="byte, byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Thrown by {@link VersionedWritable#readFields(DataInput)} when the
+ version of an object being read does not match the current implementation
+ version as returned by {@link VersionedWritable#getVersion()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionMismatchException -->
+ <!-- start class org.apache.hadoop.io.VIntWritable -->
+ <class name="VIntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VIntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VIntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VIntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VIntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for integer values stored in variable-length format.
+ Such values take between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVInt(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VIntWritable -->
+ <!-- start class org.apache.hadoop.io.VLongWritable -->
+ <class name="VLongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VLongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VLongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VLongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VLongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs in a variable-length format. Such values take
+ between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVLong(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VLongWritable -->
+ <!-- start interface org.apache.hadoop.io.Writable -->
+ <interface name="Writable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the fields of this object to <code>out</code>.
+
+ @param out <code>DataOuput</code> to serialize this object into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the fields of this object from <code>in</code>.
+
+ <p>For efficiency, implementations should attempt to re-use storage in the
+ existing object where possible.</p>
+
+ @param in <code>DataInput</code> to deseriablize this object from.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A serializable object which implements a simple, efficient, serialization
+ protocol, based on {@link DataInput} and {@link DataOutput}.
+
+ <p>Any <code>key</code> or <code>value</code> type in the Hadoop Map-Reduce
+ framework implements this interface.</p>
+
+ <p>Implementations typically implement a static <code>read(DataInput)</code>
+ method which constructs a new instance, calls {@link #readFields(DataInput)}
+ and returns the instance.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritable implements Writable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public static MyWritable read(DataInput in) throws IOException {
+ MyWritable w = new MyWritable();
+ w.readFields(in);
+ return w;
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Writable -->
+ <!-- start interface org.apache.hadoop.io.WritableComparable -->
+ <interface name="WritableComparable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable&lt;T&gt;"/>
+ <doc>
+ <![CDATA[A {@link Writable} which is also {@link Comparable}.
+
+ <p><code>WritableComparable</code>s can be compared to each other, typically
+ via <code>Comparator</code>s. Any type which is to be used as a
+ <code>key</code> in the Hadoop Map-Reduce framework should implement this
+ interface.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritableComparable implements WritableComparable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public int compareTo(MyWritableComparable w) {
+ int thisValue = this.value;
+ int thatValue = ((IntWritable)o).value;
+ return (thisValue &lt; thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableComparable -->
+ <!-- start class org.apache.hadoop.io.WritableComparator -->
+ <class name="WritableComparator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator"/>
+ <constructor name="WritableComparator" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </constructor>
+ <constructor name="WritableComparator" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"/>
+ <doc>
+ <![CDATA[Get a comparator for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link WritableComparable}
+ implementation.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the WritableComparable implementation class.]]>
+ </doc>
+ </method>
+ <method name="newKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a new {@link WritableComparable} instance.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Optimization hook. Override this to make SequenceFile.Sorter's scream.
+
+ <p>The default implementation reads the data into two {@link
+ WritableComparable}s (using {@link
+ Writable#readFields(DataInput)}, then calls {@link
+ #compare(WritableComparable,WritableComparable)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[Compare two WritableComparables.
+
+ <p> The default implementation uses the natural ordering, calling {@link
+ Comparable#compareTo(Object)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <method name="hashBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Compute hash for binary data.]]>
+ </doc>
+ </method>
+ <method name="readUnsignedShort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an unsigned short from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an integer from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a long from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator for {@link WritableComparable}s.
+
+ <p>This base implemenation uses the natural ordering. To define alternate
+ orderings, override {@link #compare(WritableComparable,WritableComparable)}.
+
+ <p>One may optimize compare-intensive operations by overriding
+ {@link #compare(byte[],int,int,byte[],int,int)}. Static utility methods are
+ provided to assist in optimized implementations of this method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableComparator -->
+ <!-- start class org.apache.hadoop.io.WritableFactories -->
+ <class name="WritableFactories" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="factory" type="org.apache.hadoop.io.WritableFactory"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.io.WritableFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factories for non-public writables. Defining a factory permits {@link
+ ObjectWritable} to be able to construct instances of non-public classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableFactories -->
+ <!-- start interface org.apache.hadoop.io.WritableFactory -->
+ <interface name="WritableFactory" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a new instance.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A factory for a class of Writable.
+ @see WritableFactories]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableFactory -->
+ <!-- start class org.apache.hadoop.io.WritableName -->
+ <class name="WritableName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the name that a class should be known as to something other than the
+ class name.]]>
+ </doc>
+ </method>
+ <method name="addName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add an alternate name for a class.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Return the name for a class. Default is {@link Class#getName()}.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the class for a name. Default is {@link Class#forName(String)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility to permit renaming of Writable implementation classes without
+ invalidiating files that contain their class name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableName -->
+ <!-- start class org.apache.hadoop.io.WritableUtils -->
+ <class name="WritableUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="WritableUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readCompressedByteArray" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skipCompressedByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedByteArray" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="bytes" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="displayByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="record" type="byte[]"/>
+ </method>
+ <method name="clone" return="T extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="orig" type="T extends org.apache.hadoop.io.Writable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Make a copy of a writable object using serialization to a buffer.
+ @param orig The object to copy
+ @return The copied object]]>
+ </doc>
+ </method>
+ <method name="cloneInto"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.io.Writable"/>
+ <param name="src" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a copy of the writable object using serialiation to a buffer
+ @param dst the object to copy from
+ @param src the object to copy into, which is destroyed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an integer to a binary stream with zero-compressed encoding.
+ For -120 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ integer is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -121 and -124, the following integer
+ is positive, with number of bytes that follow are -(v+120).
+ If the first byte value v is between -125 and -128, the following integer
+ is negative, with number of bytes that follow are -(v+124). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Integer to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized long from stream.]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized integer from stream.]]>
+ </doc>
+ </method>
+ <method name="isNegativeVInt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Given the first byte of a vint/vlong, determine the sign
+ @param value the first byte
+ @return is the value negative]]>
+ </doc>
+ </method>
+ <method name="decodeVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Parse the first byte of a vint/vlong to determine the number of bytes
+ @param value the first byte of the vint/vlong
+ @return the total number of bytes (1 to 9)]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="readEnum" return="T extends java.lang.Enum&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="enumType" type="java.lang.Class&lt;T&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an Enum value from DataInput, Enums are read and written
+ using String values.
+ @param <T> Enum type
+ @param in DataInput to read from
+ @param enumType Class type of Enum
+ @return Enum represented by String read from DataInput
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeEnum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="enumVal" type="java.lang.Enum&lt;?&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[writes String value of enum to DataOutput.
+ @param out Dataoutput stream
+ @param enumVal enum value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip <i>len</i> number of bytes in input stream<i>in</i>
+ @param in input stream
+ @param len number of bytes to skip
+ @throws IOException when skipped less number of bytes]]>
+ </doc>
+ </method>
+ <method name="toByteArray" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writables" type="org.apache.hadoop.io.Writable[]"/>
+ <doc>
+ <![CDATA[Convert writables to a byte array]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableUtils -->
+</package>
+<package name="org.apache.hadoop.io.compress">
+ <!-- start class org.apache.hadoop.io.compress.BZip2Codec -->
+ <class name="BZip2Codec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="BZip2Codec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BZip2Codec]]>
+ </doc>
+ </constructor>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates CompressionOutputStream for BZip2
+
+ @param out
+ The output Stream
+ @return The BZip2 CompressionOutputStream
+ @throws java.io.IOException
+ Throws IO exception]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates CompressionInputStream to be used to read off uncompressed data.
+
+ @param in
+ The InputStream
+ @return Returns CompressionInputStream for BZip2
+ @throws java.io.IOException
+ Throws IOException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[.bz2 is recognized as the default extension for compressed BZip2 files
+
+ @return A String telling the default bzip2 file extension]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class provides CompressionOutputStream and CompressionInputStream for
+ compression and decompression. Currently we dont have an implementation of
+ the Compressor and Decompressor interfaces, so those methods of
+ CompressionCodec which have a Compressor or Decompressor type argument, throw
+ UnsupportedOperationException.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.BZip2Codec -->
+ <!-- start class org.apache.hadoop.io.compress.CodecPool -->
+ <class name="CodecPool" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CodecPool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <doc>
+ <![CDATA[Get a {@link Compressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the
+ <code>Compressor</code>
+ @return <code>Compressor</code> for the given
+ <code>CompressionCodec</code> from the pool or a new one]]>
+ </doc>
+ </method>
+ <method name="getDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <doc>
+ <![CDATA[Get a {@link Decompressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the
+ <code>Decompressor</code>
+ @return <code>Decompressor</code> for the given
+ <code>CompressionCodec</code> the pool or a new one]]>
+ </doc>
+ </method>
+ <method name="returnCompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <doc>
+ <![CDATA[Return the {@link Compressor} to the pool.
+
+ @param compressor the <code>Compressor</code> to be returned to the pool]]>
+ </doc>
+ </method>
+ <method name="returnDecompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <doc>
+ <![CDATA[Return the {@link Decompressor} to the pool.
+
+ @param decompressor the <code>Decompressor</code> to be returned to the
+ pool]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A global compressor/decompressor pool used to save and reuse
+ (possibly native) compression/decompression codecs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CodecPool -->
+ <!-- start interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <interface name="CompressionCodec" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream}.
+
+ @param out the location for the final output stream
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream} with the given {@link Compressor}.
+
+ @param out the location for the final output stream
+ @param compressor compressor to use
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
+
+ @return the type of compressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Compressor} for use by this {@link CompressionCodec}.
+
+ @return a new compressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a stream decompressor that will read from the given input stream.
+
+ @param in the stream to read compressed bytes from
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionInputStream} that will read from the given
+ {@link InputStream} with the given {@link Decompressor}.
+
+ @param in the stream to read compressed bytes from
+ @param decompressor decompressor to use
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
+
+ @return the type of decompressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
+
+ @return a new decompressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a streaming compression/decompression pair.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <class name="CompressionCodecFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionCodecFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Find the codecs specified in the config value io.compression.codecs
+ and register them. Defaults to gzip and zip.]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Print the extension map out as a string.]]>
+ </doc>
+ </method>
+ <method name="getCodecClasses" return="java.util.List&lt;java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the list of codecs listed in the configuration
+ @param conf the configuration to look in
+ @return a list of the Configuration classes or null if the attribute
+ was not set]]>
+ </doc>
+ </method>
+ <method name="setCodecClasses"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="classes" type="java.util.List&lt;java.lang.Class&gt;"/>
+ <doc>
+ <![CDATA[Sets a list of codec classes in the configuration.
+ @param conf the configuration to modify
+ @param classes the list of classes to set]]>
+ </doc>
+ </method>
+ <method name="getCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Find the relevant compression codec for the given file based on its
+ filename suffix.
+ @param file the filename to check
+ @return the codec object]]>
+ </doc>
+ </method>
+ <method name="removeSuffix" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes a suffix from a filename, if it has it.
+ @param filename the filename to strip
+ @param suffix the suffix to remove
+ @return the shortened filename]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[A little test program.
+ @param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A factory that will find the correct codec for a given filename.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <class name="CompressionInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression input stream that reads
+ the decompressed bytes from the given stream.
+
+ @param in The input stream to be compressed.]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read bytes from the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the decompressor to its initial state and discard any buffered data,
+ as the underlying stream may have been repositioned.]]>
+ </doc>
+ </method>
+ <field name="in" type="java.io.InputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The input stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression input stream.
+
+ <p>Implementations are assumed to be buffered. This permits clients to
+ reposition the underlying input stream then call {@link #resetState()},
+ without having to also synchronize client buffers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <class name="CompressionOutputStream" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression output stream that writes
+ the compressed bytes to the given stream.
+ @param out]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finishes writing compressed data to the output stream
+ without closing the underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the compression to the initial state.
+ Does not reset the underlying stream.]]>
+ </doc>
+ </method>
+ <field name="out" type="java.io.OutputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The output stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression output stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <!-- start interface org.apache.hadoop.io.compress.Compressor -->
+ <interface name="Compressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for compression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of uncompressed bytes input so far.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of compressed bytes output so far.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[When called, indicates that compression should end
+ with the current contents of the input buffer.]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with compressed data. Returns actual number
+ of bytes of compressed data. A return value of 0 indicates that
+ needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets compressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the compressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'compressor' which can be
+ plugged into a {@link CompressionOutputStream} to compress data.
+ This is modelled after {@link java.util.zip.Deflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Compressor -->
+ <!-- start interface org.apache.hadoop.io.compress.Decompressor -->
+ <interface name="Decompressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for decompression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns <code>true</code> if a preset dictionary is needed for decompression.
+ @return <code>true</code> if a preset dictionary is needed for decompression]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with uncompressed data. Returns actual number
+ of bytes of uncompressed data. A return value of 0 indicates that
+ #needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets decompressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the decompressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'de-compressor' which can be
+ plugged into a {@link CompressionInputStream} to compress data.
+ This is modelled after {@link java.util.zip.Inflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Decompressor -->
+ <!-- start class org.apache.hadoop.io.compress.DefaultCodec -->
+ <class name="DefaultCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="DefaultCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.DefaultCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec -->
+ <class name="GzipCodec" extends="org.apache.hadoop.io.compress.DefaultCodec"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class creates gzip compressors/decompressors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <class name="GzipCodec.GzipInputStream" extends="org.apache.hadoop.io.compress.DecompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipInputStream" type="org.apache.hadoop.io.compress.DecompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow subclasses to directly set the inflater stream.]]>
+ </doc>
+ </constructor>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <class name="GzipCodec.GzipOutputStream" extends="org.apache.hadoop.io.compress.CompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipOutputStream" type="org.apache.hadoop.io.compress.CompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow children types to put a different type in here.
+ @param out the Deflater stream to use]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A bridge that wraps around a DeflaterOutputStream to make it
+ a CompressionOutputStream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <!-- start class org.apache.hadoop.io.compress.LzoCodec -->
+ <class name="LzoCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="LzoCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-lzo library is loaded & initialized.
+
+ @param conf configuration
+ @return <code>true</code> if native-lzo library is loaded & initialized;
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link org.apache.hadoop.io.compress.CompressionCodec} for a streaming
+ <b>lzo</b> compression/decompression pair.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzoCodec -->
+ <!-- start class org.apache.hadoop.io.compress.LzopCodec -->
+ <class name="LzopCodec" extends="org.apache.hadoop.io.compress.LzoCodec"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LzopCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link org.apache.hadoop.io.compress.CompressionCodec} for a streaming
+ <b>lzo</b> compression/decompression pair compatible with lzop.
+ http://www.lzop.org/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzopCodec -->
+ <!-- start class org.apache.hadoop.io.compress.LzopCodec.LzopDecompressor -->
+ <class name="LzopCodec.LzopDecompressor" extends="org.apache.hadoop.io.compress.lzo.LzoDecompressor"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="LzopCodec.LzopDecompressor" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an LzoDecompressor with LZO1X strategy (the only lzo algorithm
+ supported by lzop).]]>
+ </doc>
+ </constructor>
+ <method name="initHeaderFlags"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dflags" type="java.util.EnumSet&lt;org.apache.hadoop.io.compress.LzopCodec.DChecksum&gt;"/>
+ <param name="cflags" type="java.util.EnumSet&lt;org.apache.hadoop.io.compress.LzopCodec.CChecksum&gt;"/>
+ <doc>
+ <![CDATA[Given a set of decompressed and compressed checksums,]]>
+ </doc>
+ </method>
+ <method name="resetChecksum"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all checksums registered for this decompressor instance.]]>
+ </doc>
+ </method>
+ <method name="verifyDChecksum" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="typ" type="org.apache.hadoop.io.compress.LzopCodec.DChecksum"/>
+ <param name="checksum" type="int"/>
+ <doc>
+ <![CDATA[Given a checksum type, verify its value against that observed in
+ decompressed data.]]>
+ </doc>
+ </method>
+ <method name="verifyCChecksum" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="typ" type="org.apache.hadoop.io.compress.LzopCodec.CChecksum"/>
+ <param name="checksum" type="int"/>
+ <doc>
+ <![CDATA[Given a checksum type, verity its value against that observed in
+ compressed data.]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzopCodec.LzopDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.LzopCodec.LzopInputStream -->
+ <class name="LzopCodec.LzopInputStream" extends="org.apache.hadoop.io.compress.BlockDecompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="LzopCodec.LzopInputStream" type="java.io.InputStream, org.apache.hadoop.io.compress.Decompressor, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="readHeader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read and verify an lzo header, setting relevant block checksum options
+ and ignoring most everything else.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzopCodec.LzopInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.LzopCodec.LzopOutputStream -->
+ <class name="LzopCodec.LzopOutputStream" extends="org.apache.hadoop.io.compress.BlockCompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="LzopCodec.LzopOutputStream" type="java.io.OutputStream, org.apache.hadoop.io.compress.Compressor, int, org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="writeLzopHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="strategy" type="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write an lzop-compatible header to the OutputStream provided.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the underlying stream and write a null word to the output stream.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.LzopCodec.LzopOutputStream -->
+</package>
+<package name="org.apache.hadoop.io.compress.bzip2">
+ <!-- start interface org.apache.hadoop.io.compress.bzip2.BZip2Constants -->
+ <interface name="BZip2Constants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="baseBlockSize" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_ALPHA_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_CODE_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNA" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNB" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="N_GROUPS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="G_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="N_ITERS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_SELECTORS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NUM_OVERSHOOT_BYTES" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="rNums" type="int[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This array really shouldn't be here. Again, for historical purposes it
+ is.
+
+ <p>
+ FIXME: This array should be in a private or package private location,
+ since it could be modified by malicious code.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Base class for both the compress and decompress classes. Holds common arrays,
+ and static data.
+ <p>
+ This interface is public for historical purposes. You should have no need to
+ use it.
+ </p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.bzip2.BZip2Constants -->
+ <!-- start class org.apache.hadoop.io.compress.bzip2.BZip2DummyCompressor -->
+ <class name="BZip2DummyCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="BZip2DummyCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[This is a dummy compressor for BZip2.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.bzip2.BZip2DummyCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.bzip2.BZip2DummyDecompressor -->
+ <class name="BZip2DummyDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="BZip2DummyDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[This is a dummy decompressor for BZip2.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.bzip2.BZip2DummyDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.bzip2.CBZip2InputStream -->
+ <class name="CBZip2InputStream" extends="java.io.InputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.bzip2.BZip2Constants"/>
+ <constructor name="CBZip2InputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a new CBZip2InputStream which decompresses bytes read from the
+ specified stream.
+
+ <p>
+ Although BZip2 headers are marked with the magic <tt>"Bz"</tt> this
+ constructor expects the next byte in the stream to be the first one after
+ the magic. Thus callers have to skip the first two bytes. Otherwise this
+ constructor will throw an exception.
+ </p>
+
+ @throws IOException
+ if the stream content is malformed or an I/O error occurs.
+ @throws NullPointerException
+ if <tt>in == null</tt>]]>
+ </doc>
+ </constructor>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dest" type="byte[]"/>
+ <param name="offs" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An input stream that decompresses from the BZip2 format (without the file
+ header chars) to be read as any other stream.
+
+ <p>
+ The decompression requires large amounts of memory. Thus you should call the
+ {@link #close() close()} method as soon as possible, to force
+ <tt>CBZip2InputStream</tt> to release the allocated memory. See
+ {@link CBZip2OutputStream CBZip2OutputStream} for information about memory
+ usage.
+ </p>
+
+ <p>
+ <tt>CBZip2InputStream</tt> reads bytes from the compressed source stream via
+ the single byte {@link java.io.InputStream#read() read()} method exclusively.
+ Thus you should consider to use a buffered source stream.
+ </p>
+
+ <p>
+ Instances of this class are not threadsafe.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.bzip2.CBZip2InputStream -->
+ <!-- start class org.apache.hadoop.io.compress.bzip2.CBZip2OutputStream -->
+ <class name="CBZip2OutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.bzip2.BZip2Constants"/>
+ <constructor name="CBZip2OutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a new <tt>CBZip2OutputStream</tt> with a blocksize of 900k.
+
+ <p>
+ <b>Attention: </b>The caller is resonsible to write the two BZip2 magic
+ bytes <tt>"BZ"</tt> to the specified stream prior to calling this
+ constructor.
+ </p>
+
+ @param out *
+ the destination stream.
+
+ @throws IOException
+ if an I/O error occurs in the specified stream.
+ @throws NullPointerException
+ if <code>out == null</code>.]]>
+ </doc>
+ </constructor>
+ <constructor name="CBZip2OutputStream" type="java.io.OutputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a new <tt>CBZip2OutputStream</tt> with specified blocksize.
+
+ <p>
+ <b>Attention: </b>The caller is resonsible to write the two BZip2 magic
+ bytes <tt>"BZ"</tt> to the specified stream prior to calling this
+ constructor.
+ </p>
+
+
+ @param out
+ the destination stream.
+ @param blockSize
+ the blockSize as 100k units.
+
+ @throws IOException
+ if an I/O error occurs in the specified stream.
+ @throws IllegalArgumentException
+ if <code>(blockSize < 1) || (blockSize > 9)</code>.
+ @throws NullPointerException
+ if <code>out == null</code>.
+
+ @see #MIN_BLOCKSIZE
+ @see #MAX_BLOCKSIZE]]>
+ </doc>
+ </constructor>
+ <method name="hbMakeCodeLengths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="len" type="char[]"/>
+ <param name="freq" type="int[]"/>
+ <param name="alphaSize" type="int"/>
+ <param name="maxLen" type="int"/>
+ <doc>
+ <![CDATA[This method is accessible by subclasses for historical purposes. If you
+ don't know what it does then you don't need it.]]>
+ </doc>
+ </method>
+ <method name="chooseBlockSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputLength" type="long"/>
+ <doc>
+ <![CDATA[Chooses a blocksize based on the given length of the data to compress.
+
+ @return The blocksize, between {@link #MIN_BLOCKSIZE} and
+ {@link #MAX_BLOCKSIZE} both inclusive. For a negative
+ <tt>inputLength</tt> this method returns <tt>MAX_BLOCKSIZE</tt>
+ always.
+
+ @param inputLength
+ The length of the data which will be compressed by
+ <tt>CBZip2OutputStream</tt>.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Overriden to close the stream.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBlockSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the blocksize parameter specified at construction time.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offs" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="MIN_BLOCKSIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The minimum supported blocksize <tt> == 1</tt>.]]>
+ </doc>
+ </field>
+ <field name="MAX_BLOCKSIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The maximum supported blocksize <tt> == 9</tt>.]]>
+ </doc>
+ </field>
+ <field name="SETMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="CLEARMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="GREATER_ICOST" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="LESSER_ICOST" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="SMALL_THRESH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="DEPTH_THRESH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="WORK_FACTOR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="QSORT_STACK_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.
+ <p>
+ If you are ever unlucky/improbable enough to get a stack overflow whilst
+ sorting, increase the following constant and try again. In practice I
+ have never seen the stack go above 27 elems, so the following limit seems
+ very generous.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An output stream that compresses into the BZip2 format (without the file
+ header chars) into another stream.
+
+ <p>
+ The compression requires large amounts of memory. Thus you should call the
+ {@link #close() close()} method as soon as possible, to force
+ <tt>CBZip2OutputStream</tt> to release the allocated memory.
+ </p>
+
+ <p>
+ You can shrink the amount of allocated memory and maybe raise the compression
+ speed by choosing a lower blocksize, which in turn may cause a lower
+ compression ratio. You can avoid unnecessary memory allocation by avoiding
+ using a blocksize which is bigger than the size of the input.
+ </p>
+
+ <p>
+ You can compute the memory usage for compressing by the following formula:
+ </p>
+
+ <pre>
+ &lt;code&gt;400k + (9 * blocksize)&lt;/code&gt;.
+ </pre>
+
+ <p>
+ To get the memory required for decompression by {@link CBZip2InputStream
+ CBZip2InputStream} use
+ </p>
+
+ <pre>
+ &lt;code&gt;65k + (5 * blocksize)&lt;/code&gt;.
+ </pre>
+
+ <table width="100%" border="1">
+ <colgroup> <col width="33%" /> <col width="33%" /> <col width="33%" />
+ </colgroup>
+ <tr>
+ <th colspan="3">Memory usage by blocksize</th>
+ </tr>
+ <tr>
+ <th align="right">Blocksize</th> <th align="right">Compression<br>
+ memory usage</th> <th align="right">Decompression<br>
+ memory usage</th>
+ </tr>
+ <tr>
+ <td align="right">100k</td>
+ <td align="right">1300k</td>
+ <td align="right">565k</td>
+ </tr>
+ <tr>
+ <td align="right">200k</td>
+ <td align="right">2200k</td>
+ <td align="right">1065k</td>
+ </tr>
+ <tr>
+ <td align="right">300k</td>
+ <td align="right">3100k</td>
+ <td align="right">1565k</td>
+ </tr>
+ <tr>
+ <td align="right">400k</td>
+ <td align="right">4000k</td>
+ <td align="right">2065k</td>
+ </tr>
+ <tr>
+ <td align="right">500k</td>
+ <td align="right">4900k</td>
+ <td align="right">2565k</td>
+ </tr>
+ <tr>
+ <td align="right">600k</td>
+ <td align="right">5800k</td>
+ <td align="right">3065k</td>
+ </tr>
+ <tr>
+ <td align="right">700k</td>
+ <td align="right">6700k</td>
+ <td align="right">3565k</td>
+ </tr>
+ <tr>
+ <td align="right">800k</td>
+ <td align="right">7600k</td>
+ <td align="right">4065k</td>
+ </tr>
+ <tr>
+ <td align="right">900k</td>
+ <td align="right">8500k</td>
+ <td align="right">4565k</td>
+ </tr>
+ </table>
+
+ <p>
+ For decompression <tt>CBZip2InputStream</tt> allocates less memory if the
+ bzipped input is smaller than one block.
+ </p>
+
+ <p>
+ Instances of this class are not threadsafe.
+ </p>
+
+ <p>
+ TODO: Update to BZip2 1.0.1
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.bzip2.CBZip2OutputStream -->
+</package>
+<package name="org.apache.hadoop.io.compress.lzo">
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor -->
+ <class name="LzoCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="LzoCompressor" type="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified {@link CompressionStrategy}.
+
+ @param strategy lzo compression algorithm to use
+ @param directBufferSize size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="LzoCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default lzo1x_1 compression.]]>
+ </doc>
+ </constructor>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if lzo compressors are loaded and initialized.
+
+ @return <code>true</code> if lzo compressors are loaded & initialized,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of bytes given to this compressor since last reset.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of bytes consumed by callers of compress since last reset.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Noop.]]>
+ </doc>
+ </method>
+ <field name="LZO_LIBRARY_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the lzo algorithm.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy -->
+ <class name="LzoCompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression algorithm for lzo library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor -->
+ <class name="LzoDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="LzoDecompressor" type="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new lzo decompressor.
+
+ @param strategy lzo decompression algorithm
+ @param directBufferSize size of the direct-buffer]]>
+ </doc>
+ </constructor>
+ <constructor name="LzoDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new lzo decompressor.]]>
+ </doc>
+ </constructor>
+ <method name="isNativeLzoLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if lzo decompressors are loaded and initialized.
+
+ @return <code>true</code> if lzo decompressors are loaded & initialized,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <field name="LZO_LIBRARY_VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the lzo algorithm.
+ http://www.oberhumer.com/opensource/lzo/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy -->
+ <class name="LzoDecompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy -->
+</package>
+<package name="org.apache.hadoop.io.compress.zlib">
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <class name="BuiltInZlibDeflater" extends="java.util.zip.Deflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="BuiltInZlibDeflater" type="int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Deflater to make it conform
+ to org.apache.hadoop.io.compress.Compressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <class name="BuiltInZlibInflater" extends="java.util.zip.Inflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="BuiltInZlibInflater" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibInflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Inflater to make it conform
+ to org.apache.hadoop.io.compress.Decompressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <class name="ZlibCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="ZlibCompressor" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified compression level.
+ Compressed data will be generated in ZLIB format.
+
+ @param level Compression level #CompressionLevel
+ @param strategy Compression strategy #CompressionStrategy
+ @param header Compression header #CompressionHeader
+ @param directBufferSize Size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default compression level.
+ Compressed data will be generated in ZLIB format.]]>
+ </doc>
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <class name="ZlibCompressor.CompressionHeader" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The type of header for compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <class name="ZlibCompressor.CompressionLevel" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <class name="ZlibCompressor.CompressionStrategy" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <class name="ZlibDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="ZlibDecompressor" type="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new decompressor.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <class name="ZlibDecompressor.CompressionHeader" extends="java.lang.Enum&lt;org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The headers to detect from compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+ <class name="ZlibFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ZlibFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeZlibLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-zlib code is loaded & initialized correctly and
+ can be loaded for this job.
+
+ @param conf configuration
+ @return <code>true</code> if native-zlib is loaded & initialized
+ and can be loaded for this job, else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Compressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressorType" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.Decompressor&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of factories to create the right
+ zlib/gzip compressor/decompressor instances.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+</package>
+<package name="org.apache.hadoop.io.retry">
+ <!-- start class org.apache.hadoop.io.retry.RetryPolicies -->
+ <class name="RetryPolicies" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryPolicies"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="retryUpToMaximumCountWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumTimeWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxTime" type="long"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying for a maximum time, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumCountWithProportionalSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by the number of tries so far.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="exponentialBackoffRetry" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by a random
+ number in the range of [0, 2 to the number of retries)
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map&lt;java.lang.Class&lt;? extends java.lang.Exception&gt;, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByRemoteException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map&lt;java.lang.Class&lt;? extends java.lang.Exception&gt;, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ A retry policy for RemoteException
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <field name="TRY_ONCE_THEN_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail by re-throwing the exception.
+ This corresponds to having no retry mechanism in place.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="TRY_ONCE_DONT_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail silently for <code>void</code> methods, or by
+ re-throwing the exception for non-<code>void</code> methods.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="RETRY_FOREVER" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Keep trying forever.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A collection of useful implementations of {@link RetryPolicy}.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryPolicies -->
+ <!-- start interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <interface name="RetryPolicy" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="shouldRetry" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Exception"/>
+ <param name="retries" type="int"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[<p>
+ Determines whether the framework should retry a
+ method for the given exception, and the number
+ of retries that have been made for that operation
+ so far.
+ </p>
+ @param e The exception that caused the method to fail.
+ @param retries The number of times the method has been retried.
+ @return <code>true</code> if the method should be retried,
+ <code>false</code> if the method should not be retried
+ but shouldn't fail with an exception (only for void methods).
+ @throws Exception The re-thrown exception <code>e</code> indicating
+ that the method failed and should not be retried further.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Specifies a policy for retrying method failures.
+ Implementations of this interface should be immutable.
+ </p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <!-- start class org.apache.hadoop.io.retry.RetryProxy -->
+ <class name="RetryProxy" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryProxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class&lt;?&gt;"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the same retry policy for each method in the interface.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param retryPolicy the policy for retirying method call failures
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class&lt;?&gt;"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="methodNameToPolicyMap" type="java.util.Map&lt;java.lang.String, org.apache.hadoop.io.retry.RetryPolicy&gt;"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the a set of retry policies specified by method name.
+ If no retry policy is defined for a method then a default of
+ {@link RetryPolicies#TRY_ONCE_THEN_FAIL} is used.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param methodNameToPolicyMap a map of method names to retry policies
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for creating retry proxies.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryProxy -->
+</package>
+<package name="org.apache.hadoop.io.serializer">
+ <!-- start interface org.apache.hadoop.io.serializer.Deserializer -->
+ <interface name="Deserializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the deserializer for reading.</p>]]>
+ </doc>
+ </method>
+ <method name="deserialize" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ Deserialize the next object from the underlying input stream.
+ If the object <code>t</code> is non-null then this deserializer
+ <i>may</i> set its internal state to the next object read from the input
+ stream. Otherwise, if the object <code>t</code> is null a new
+ deserialized object will be created.
+ </p>
+ @return the deserialized object]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying input stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for deserializing objects of type <T> from an
+ {@link InputStream}.
+ </p>
+
+ <p>
+ Deserializers are stateful, but must not buffer the input since
+ other producers may read from the input between calls to
+ {@link #deserialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Deserializer -->
+ <!-- start class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <class name="DeserializerComparator" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator&lt;T&gt;"/>
+ <constructor name="DeserializerComparator" type="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link Deserializer} to deserialize
+ the objects to be compared so that the standard {@link Comparator} can
+ be used to compare them.
+ </p>
+ <p>
+ One may optimize compare-intensive operations by using a custom
+ implementation of {@link RawComparator} that operates directly
+ on byte representations.
+ </p>
+ @param <T>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <class name="JavaSerialization" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization&lt;java.io.Serializable&gt;"/>
+ <constructor name="JavaSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;java.io.Serializable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;java.io.Serializable&gt;"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;java.io.Serializable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;java.io.Serializable&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ An experimental {@link Serialization} for Java {@link Serializable} classes.
+ </p>
+ @see JavaSerializationComparator]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <class name="JavaSerializationComparator" extends="org.apache.hadoop.io.serializer.DeserializerComparator&lt;T&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JavaSerializationComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o1" type="T extends java.io.Serializable &amp; java.lang.Comparable&lt;T&gt;"/>
+ <param name="o2" type="T extends java.io.Serializable &amp; java.lang.Comparable&lt;T&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link JavaSerialization}
+ {@link Deserializer} to deserialize objects that are then compared via
+ their {@link Comparable} interfaces.
+ </p>
+ @param <T>
+ @see JavaSerialization]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serialization -->
+ <interface name="Serialization" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Allows clients to test whether this {@link Serialization}
+ supports the given class.]]>
+ </doc>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[@return a {@link Serializer} for the given class.]]>
+ </doc>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <doc>
+ <![CDATA[@return a {@link Deserializer} for the given class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Encapsulates a {@link Serializer}/{@link Deserializer} pair.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serialization -->
+ <!-- start class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <class name="SerializationFactory" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SerializationFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Serializations are found by reading the <code>io.serializations</code>
+ property from <code>conf</code>, which is a comma-delimited list of
+ classnames.
+ </p>]]>
+ </doc>
+ </constructor>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <method name="getSerialization" return="org.apache.hadoop.io.serializer.Serialization&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for {@link Serialization}s.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serializer -->
+ <interface name="Serializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the serializer for writing.</p>]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Serialize <code>t</code> to the underlying output stream.</p>]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying output stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for serializing objects of type <T> to an
+ {@link OutputStream}.
+ </p>
+
+ <p>
+ Serializers are stateful, but must not buffer the output since
+ other producers may write to the output between calls to
+ {@link #serialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serializer -->
+ <!-- start class org.apache.hadoop.io.serializer.WritableSerialization -->
+ <class name="WritableSerialization" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization&lt;org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="WritableSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;?&gt;"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;org.apache.hadoop.io.Writable&gt;"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;org.apache.hadoop.io.Writable&gt;"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Serialization} for {@link Writable}s that delegates to
+ {@link Writable#write(java.io.DataOutput)} and
+ {@link Writable#readFields(java.io.DataInput)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.WritableSerialization -->
+</package>
+<package name="org.apache.hadoop.ipc">
+ <!-- start class org.apache.hadoop.ipc.Client -->
+ <class name="Client" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Client" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;, org.apache.hadoop.conf.Configuration, javax.net.SocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client whose values are of the given {@link Writable}
+ class.]]>
+ </doc>
+ </constructor>
+ <constructor name="Client" type="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client with the default SocketFactory
+ @param valueClass
+ @param conf]]>
+ </doc>
+ </constructor>
+ <method name="setPingInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="pingInterval" type="int"/>
+ <doc>
+ <![CDATA[set the ping interval value in configuration
+
+ @param conf Configuration
+ @param pingInterval the ping interval]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop all threads related to this client. No further calls may be made
+ using this client.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a call, passing <code>param</code>, to the IPC server running at
+ <code>address</code>, returning the value. Throws exceptions if there are
+ network problems or if the remote code threw an exception.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="params" type="org.apache.hadoop.io.Writable[]"/>
+ <param name="addresses" type="java.net.InetSocketAddress[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Makes a set of calls in parallel. Each parameter is sent to the
+ corresponding address. When all values are available, or have timed out
+ or errored, the collected results are returned in an array. The array
+ contains nulls for calls that timed out or errored.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A client for an IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Server]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Client -->
+ <!-- start class org.apache.hadoop.ipc.RemoteException -->
+ <class name="RemoteException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RemoteException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lookupTypes" type="java.lang.Class[]"/>
+ <doc>
+ <![CDATA[If this remote exception wraps up one of the lookupTypes
+ then return this exception.
+ <p>
+ Unwraps any IOException.
+
+ @param lookupTypes the desired exception class.
+ @return IOException, which is either the lookupClass exception or this.]]>
+ </doc>
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Instantiate and return the exception wrapped up by this remote exception.
+
+ <p> This unwraps any <code>Throwable</code> that has a constructor taking
+ a <code>String</code> as a parameter.
+ Otherwise it returns this.
+
+ @return <code>Throwable]]>
+ </doc>
+ </method>
+ <method name="writeXml"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="doc" type="org.znerd.xmlenc.XMLOutputter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the object to XML format]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.ipc.RemoteException"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attrs" type="org.xml.sax.Attributes"/>
+ <doc>
+ <![CDATA[Create RemoteException from attributes]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RemoteException -->
+ <!-- start class org.apache.hadoop.ipc.RPC -->
+ <class name="RPC" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="waitForProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class&lt;?&gt;"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object with the default SocketFactory
+
+ @param protocol
+ @param clientVersion
+ @param addr
+ @param conf
+ @return a proxy instance
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopProxy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="proxy" type="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <doc>
+ <![CDATA[Stop this proxy and release its invoker's resource
+ @param proxy the proxy to be stopped]]>
+ </doc>
+ </method>
+ <method name="call" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="method" type="java.lang.reflect.Method"/>
+ <param name="params" type="java.lang.Object[][]"/>
+ <param name="addrs" type="java.net.InetSocketAddress[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Expert: Make multiple, parallel calls to a set of servers.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="numHandlers" type="int"/>
+ <param name="verbose" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple RPC mechanism.
+
+ A <i>protocol</i> is a Java interface. All parameters and return types must
+ be one of:
+
+ <ul> <li>a primitive type, <code>boolean</code>, <code>byte</code>,
+ <code>char</code>, <code>short</code>, <code>int</code>, <code>long</code>,
+ <code>float</code>, <code>double</code>, or <code>void</code>; or</li>
+
+ <li>a {@link String}; or</li>
+
+ <li>a {@link Writable}; or</li>
+
+ <li>an array of the above types</li> </ul>
+
+ All methods in the protocol should throw only IOException. No field data of
+ the protocol instance is transmitted.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC -->
+ <!-- start class org.apache.hadoop.ipc.RPC.Server -->
+ <class name="RPC.Server" extends="org.apache.hadoop.ipc.Server"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on]]>
+ </doc>
+ </constructor>
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on
+ @param numHandlers the number of method handler threads to run
+ @param verbose whether each call should be logged]]>
+ </doc>
+ </constructor>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receivedTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An RPC Server.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.Server -->
+ <!-- start class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <class name="RPC.VersionMismatch" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.VersionMismatch" type="java.lang.String, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a version mismatch exception
+ @param interfaceName the name of the protocol mismatch
+ @param clientVersion the client's version of the protocol
+ @param serverVersion the server's version of the protocol]]>
+ </doc>
+ </constructor>
+ <method name="getInterfaceName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the interface name
+ @return the java class name
+ (eg. org.apache.hadoop.mapred.InterTrackerProtocol)]]>
+ </doc>
+ </method>
+ <method name="getClientVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the client's preferred version]]>
+ </doc>
+ </method>
+ <method name="getServerVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the server's agreed to version.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A version mismatch for the RPC protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <!-- start class org.apache.hadoop.ipc.Server -->
+ <class name="Server" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;, int, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;, int, org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a server listening on the named port and address. Parameters passed must
+ be of the named class. The <code>handlerCount</handlerCount> determines
+ the number of handler threads that will be used to process calls.]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.ipc.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the server instance called under or null. May be called under
+ {@link #call(Writable, long)} implementations, and under {@link Writable}
+ methods of paramters and return values. Permits applications to access
+ the server context.]]>
+ </doc>
+ </method>
+ <method name="getRemoteIp" return="java.net.InetAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the remote side ip address when invoked inside an RPC
+ Returns null incase of an error.]]>
+ </doc>
+ </method>
+ <method name="getRemoteAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns remote address as a string when invoked inside an RPC.
+ Returns null in case of an error.]]>
+ </doc>
+ </method>
+ <method name="bind"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.ServerSocket"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <param name="backlog" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A convenience method to bind to a given address and report
+ better exceptions if the address is not a valid host.
+ @param socket the socket to bind
+ @param address the address to bind to
+ @param backlog the number of connections allowed in the queue
+ @throws BindException if the address can't be bound
+ @throws UnknownHostException if the address isn't a valid host name
+ @throws IOException other random errors from bind]]>
+ </doc>
+ </method>
+ <method name="setSocketSendBufSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Sets the socket buffer size used for responding to RPCs]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts the service. Must be called before any calls will be handled.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops the service. No new calls will be handled after this is called.]]>
+ </doc>
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Wait for the server to be stopped.
+ Does not wait for all subthreads to finish.
+ See {@link #stop()}.]]>
+ </doc>
+ </method>
+ <method name="getListenerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the socket (ip+port) on which the RPC server is listening to.
+ @return the socket (ip+port) on which the RPC server is listening to.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receiveTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called for each call.]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <field name="HEADER" type="java.nio.ByteBuffer"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The first four bytes of Hadoop RPC connections]]>
+ </doc>
+ </field>
+ <field name="CURRENT_VERSION" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="rpcMetrics" type="org.apache.hadoop.ipc.metrics.RpcMetrics"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Client]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Server -->
+ <!-- start interface org.apache.hadoop.ipc.VersionedProtocol -->
+ <interface name="VersionedProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return protocol version corresponding to protocol interface.
+ @param protocol The classname of the protocol interface
+ @param clientVersion The version of the protocol that the client speaks
+ @return the version that the server will speak]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Superclass of all protocols that use Hadoop RPC.
+ Subclasses of this interface are also supposed to have
+ a static final long versionID field.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.VersionedProtocol -->
+</package>
+<package name="org.apache.hadoop.ipc.metrics">
+ <!-- start class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <class name="RpcMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="RpcMetrics" type="java.lang.String, java.lang.String, org.apache.hadoop.ipc.Server"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Push the metrics to the monitoring subsystem on doUpdate() call.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="rpcQueueTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The metrics variables are public:
+ - they can be set directly by calling their set/inc methods
+ -they can also be read directly - e.g. JMX does this.]]>
+ </doc>
+ </field>
+ <field name="rpcProcessingTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="metricsList" type="java.util.Map&lt;java.lang.String, org.apache.hadoop.metrics.util.MetricsTimeVaryingRate&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various RPC statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #rpcQueueTime}.inc(time)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <!-- start interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+ <interface name="RpcMgtMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRpcOpsNumber" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of RPC Operations in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for RPC Operations in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Average RPC Operation Queued Time in the last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all min max times]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for the RPC layer.
+ Many of the statistics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the statistics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ rpc.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+</package>
+<package name="org.apache.hadoop.log">
+ <!-- start class org.apache.hadoop.log.LogLevel -->
+ <class name="LogLevel" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[A command line implementation]]>
+ </doc>
+ </method>
+ <field name="USAGES" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Change log level in runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel -->
+ <!-- start class org.apache.hadoop.log.LogLevel.Servlet -->
+ <class name="LogLevel.Servlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel.Servlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A servlet implementation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel.Servlet -->
+</package>
+<package name="org.apache.hadoop.mapred">
+ <!-- start class org.apache.hadoop.mapred.ClusterStatus -->
+ <class name="ClusterStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of task trackers in the cluster.
+
+ @return the number of task trackers in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running map tasks in the cluster.
+
+ @return the number of currently running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running reduce tasks in the cluster.
+
+ @return the number of currently running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running map tasks in the cluster.
+
+ @return the maximum capacity for running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running reduce tasks in the cluster.
+
+ @return the maximum capacity for running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getJobTrackerState" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current state of the <code>JobTracker</code>,
+ as {@link JobTracker.State}
+
+ @return the current state of the <code>JobTracker</code>.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Status information on the current state of the Map-Reduce cluster.
+
+ <p><code>ClusterStatus</code> provides clients with information such as:
+ <ol>
+ <li>
+ Size of the cluster.
+ </li>
+ <li>
+ Task capacity of the cluster.
+ </li>
+ <li>
+ The number of currently running map & reduce tasks.
+ </li>
+ <li>
+ State of the <code>JobTracker</code>.
+ </li>
+ </ol></p>
+
+ <p>Clients can query for the latest <code>ClusterStatus</code>, via
+ {@link JobClient#getClusterStatus()}.</p>
+
+ @see JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ClusterStatus -->
+ <!-- start class org.apache.hadoop.mapred.Counters -->
+ <class name="Counters" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.mapred.Counters.Group&gt;"/>
+ <constructor name="Counters"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getGroupNames" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all counter classes.
+ @return Set of counter names.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.mapred.Counters.Group&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getGroup" return="org.apache.hadoop.mapred.Counters.Group"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="groupName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the named counter group, or an empty group if there is none
+ with the specified name.]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Find the counter for the given enum. The same enum will always return the
+ same counter.
+ @param key the counter key
+ @return the matching counter object]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Find a counter given the group and the name.
+ @param group the name of the group
+ @param name the internal name of the counter
+ @return the counter for that name]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="group" type="java.lang.String"/>
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Find a counter by using strings
+ @param group the name of the group
+ @param id the id of the counter within the group (0 to N-1)
+ @param name the internal name of the counter
+ @return the counter for that name
+ @deprecated]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param key identifies a counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="counter" type="java.lang.String"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param group the name of the group
+ @param counter the internal name of the counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Returns current value of the specified counter, or 0 if the counter
+ does not exist.]]>
+ </doc>
+ </method>
+ <method name="incrAllCounters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Increments multiple counters by their amounts in another Counters
+ instance.
+ @param other the other Counters instance]]>
+ </doc>
+ </method>
+ <method name="sum" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.mapred.Counters"/>
+ <param name="b" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Convenience method for computing the sum of two sets of counters.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of counters, by summing the number of counters
+ in each group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the set of groups.
+ The external format is:
+ #groups (groupName group)*
+
+ i.e. the number of groups followed by 0 or more groups, where each
+ group is of the form:
+
+ groupDisplayName #counters (false | true counter)*
+
+ where each counter is of the form:
+
+ name (false | true displayName) value]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a set of groups.]]>
+ </doc>
+ </method>
+ <method name="log"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Logs the current counter values.
+ @param log The log to use.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return textual representation of the counter values.]]>
+ </doc>
+ </method>
+ <method name="makeCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert a counters object into a single line that is easy to parse.
+ @return the string with "name=value" for each counter and separated by ","]]>
+ </doc>
+ </method>
+ <method name="makeEscapedCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Represent the counter in a textual format that can be converted back to
+ its object form
+ @return the string in the following format
+ {(groupname)(group-displayname)[(countername)(displayname)(value)][][]}{}{}]]>
+ </doc>
+ </method>
+ <method name="fromEscapedCompactString" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compactString" type="java.lang.String"/>
+ <exception name="ParseException" type="java.text.ParseException"/>
+ <doc>
+ <![CDATA[Convert a stringified counter representation into a counter object. Note
+ that the counter can be recovered if its stringified using
+ {@link #makeEscapedCompactString()}.
+ @return a Counter]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A set of named counters.
+
+ <p><code>Counters</code> represent global counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> can be of
+ any {@link Enum} type.</p>
+
+ <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
+ counters from a particular <code>Enum</code> class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Counter -->
+ <class name="Counters.Counter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the internal name of the counter.
+ @return the internal name of the counter]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the counter.
+ @return the user facing name of the counter]]>
+ </doc>
+ </method>
+ <method name="setDisplayName"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="displayName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the display name of the counter.]]>
+ </doc>
+ </method>
+ <method name="makeEscapedCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compact stringified version of the counter in the format
+ [(actual-name)(display-name)(value)]]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[What is the current value of this counter?
+ @return the current value]]>
+ </doc>
+ </method>
+ <method name="increment"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Increment this counter by the given value
+ @param incr the value to increase this counter by]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A counter record, comprising its name and value.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Counter -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Group -->
+ <class name="Counters.Group" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.mapred.Counters.Counter&gt;"/>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns raw name of the group. This is the name of the enum class
+ for this group of counters.]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns localized name of the group. This is the same as getName() by
+ default, but different if an appropriate ResourceBundle is found.]]>
+ </doc>
+ </method>
+ <method name="setDisplayName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="displayName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the display name]]>
+ </doc>
+ </method>
+ <method name="makeEscapedCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compact stringified version of the group in the format
+ {(actual-name)(display-name)(value)[][][]} where [] are compact strings for the
+ counters within.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="counterName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the specified counter, or 0 if the counter does
+ not exist.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getCounter(String)} instead">
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given id and create it if it doesn't exist.
+ @param id the numeric id of the counter within the group
+ @param name the internal counter name
+ @return the counter
+ @deprecated use {@link #getCounter(String)} instead]]>
+ </doc>
+ </method>
+ <method name="getCounterForName" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given name and create it if it doesn't exist.
+ @param name the internal counter name
+ @return the counter]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of counters in this group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.mapred.Counters.Counter&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<code>Group</code> of counters, comprising of counters from a particular
+ counter {@link Enum} class.
+
+ <p><code>Group</code>handles localization of the class name and the
+ counter names.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Group -->
+ <!-- start class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <class name="DefaultJobHistoryParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DefaultJobHistoryParser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseJobTasks"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobHistoryFile" type="java.lang.String"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobHistory.JobInfo"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Populates a JobInfo object from the job's history log file.
+ @param jobHistoryFile history file for this job.
+ @param job a precreated JobInfo object, should be non-null.
+ @param fs FileSystem where historyFile is present.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Default parser for job history files. It creates object model from
+ job history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <!-- start class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <class name="FileAlreadyExistsException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileAlreadyExistsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileAlreadyExistsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when target file already exists for any operation and
+ is not configured to be overwritten.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <!-- start class org.apache.hadoop.mapred.FileInputFormat -->
+ <class name="FileInputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <constructor name="FileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setMinSplitSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="minSplitSize" type="long"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="filename" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Is the given filename splitable? Usually, true, but if the file is
+ stream compressed, it will not be.
+
+ <code>FileInputFormat</code> implementations can override this and return
+ <code>false</code> to ensure that individual input files are never split-up
+ so that {@link Mapper}s process entire files.
+
+ @param fs the file system that the file is on
+ @param filename the file name to check
+ @return is this file splitable?]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setInputPathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="filter" type="java.lang.Class&lt;? extends org.apache.hadoop.fs.PathFilter&gt;"/>
+ <doc>
+ <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job.
+
+ @param filter the PathFilter class use for filtering the input paths.]]>
+ </doc>
+ </method>
+ <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get a PathFilter instance of the filter set for the input paths.
+
+ @return the PathFilter instance set for the job, NULL if none has been set.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression.
+
+ @param job the job to list input paths for
+ @return array of FileStatus objects
+ @throws IOException if zero items.]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Splits files returned by {@link #listStatus(JobConf)} when
+ they're too big.]]>
+ </doc>
+ </method>
+ <method name="computeSplitSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="goalSize" type="long"/>
+ <param name="minSize" type="long"/>
+ <param name="blockSize" type="long"/>
+ </method>
+ <method name="getBlockIndex" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+ <param name="offset" type="long"/>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the given comma separated paths as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be set as
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add the given comma separated paths to the list of inputs for
+ the map-reduce job.
+
+ @param conf The configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be added to
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Set the array of {@link Path}s as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job.
+ @param inputPaths the {@link Path}s of the input directories/files
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+
+ @param conf The configuration of the job
+ @return the list of input {@link Path}s for the map-reduce job.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class for file-based {@link InputFormat}.
+
+ <p><code>FileInputFormat</code> is the base class for all file-based
+ <code>InputFormat</code>s. This provides a generic implementation of
+ {@link #getSplits(JobConf, int)}.
+ Subclasses of <code>FileInputFormat</code> can also override the
+ {@link #isSplitable(FileSystem, Path)} method to ensure input-files are
+ not split-up and are processed as a whole by {@link Mapper}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileOutputCommitter -->
+ <class name="FileOutputCommitter" extends="org.apache.hadoop.mapred.OutputCommitter"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileOutputCommitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setupJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cleanupJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setupTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="commitTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="abortTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ </method>
+ <method name="needsTaskCommit" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TEMP_DIR_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Temporary directory name]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An {@link OutputCommitter} that commits files specified
+ in job output directory i.e. ${mapred.output.dir}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileOutputCommitter -->
+ <!-- start class org.apache.hadoop.mapred.FileOutputFormat -->
+ <class name="FileOutputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="FileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setCompressOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+ <code>false</code> otherwise]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+ compress the job outputs]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the
+ job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the {@link Path} of the output directory for the map-reduce job.
+
+ @param conf The configuration of the job.
+ @param outputDir the {@link Path} of the output directory for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the output directory for the map-reduce job.
+
+ @return the {@link Path} to the output directory for the map-reduce job.
+ @see FileOutputFormat#getWorkOutputPath(JobConf)]]>
+ </doc>
+ </method>
+ <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the task's temporary output directory
+ for the map-reduce job
+
+ <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4>
+
+ <p><i>Note:</i> The following is valid only if the {@link OutputCommitter}
+ is {@link FileOutputCommitter}. If <code>OutputCommitter</code> is not
+ a <code>FileOutputCommitter</code>, the task's temporary output
+ directory is same as {@link #getOutputPath(JobConf)} i.e.
+ <tt>${mapred.output.dir}$</tt></p>
+
+ <p>Some applications need to create/write-to side-files, which differ from
+ the actual job-outputs.
+
+ <p>In such cases there could be issues with 2 instances of the same TIP
+ (running simultaneously e.g. speculative tasks) trying to open/write-to the
+ same file (path) on HDFS. Hence the application-writer will have to pick
+ unique names per task-attempt (e.g. using the attemptid, say
+ <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p>
+
+ <p>To get around this the Map-Reduce framework helps the application-writer
+ out by maintaining a special
+ <tt>${mapred.output.dir}/_temporary/_${taskid}</tt>
+ sub-directory for each task-attempt on HDFS where the output of the
+ task-attempt goes. On successful completion of the task-attempt the files
+ in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only)
+ are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the
+ framework discards the sub-directory of unsuccessful task-attempts. This
+ is completely transparent to the application.</p>
+
+ <p>The application-writer can take advantage of this by creating any
+ side-files required in <tt>${mapred.work.output.dir}</tt> during execution
+ of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the
+ framework will move them out similarly - thus she doesn't have to pick
+ unique paths per task-attempt.</p>
+
+ <p><i>Note</i>: the value of <tt>${mapred.work.output.dir}</tt> during
+ execution of a particular task-attempt is actually
+ <tt>${mapred.output.dir}/_temporary/_{$taskid}</tt>, and this value is
+ set by the map-reduce framework. So, just create any side-files in the
+ path returned by {@link #getWorkOutputPath(JobConf)} from map/reduce
+ task to take advantage of this feature.</p>
+
+ <p>The entire discussion holds true for maps of jobs with
+ reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
+ goes directly to HDFS.</p>
+
+ @return the {@link Path} to the task's temporary output directory
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to create the task's temporary output directory and
+ return the path to the task's output file.
+
+ @param conf job-configuration
+ @param name temporary task-output filename
+ @return path to the task's temporary output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUniqueName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Helper function to generate a name that is unique for the task.
+
+ <p>The generated name can be used to create custom files from within the
+ different tasks for the job, the names for different tasks will not collide
+ with each other.</p>
+
+ <p>The given name is postfixed with the task type, 'm' for maps, 'r' for
+ reduces and the task partition number. For example, give a name 'test'
+ running on the first map o the job the generated name will be
+ 'test-m-00000'.</p>
+
+ @param conf the configuration for the job.
+ @param name the name to make unique.
+ @return a unique name accross all tasks of the job.]]>
+ </doc>
+ </method>
+ <method name="getPathForCustomFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Helper function to generate a {@link Path} for a file that is unique for
+ the task within the job output directory.
+
+ <p>The path can be used to create custom files from within the map and
+ reduce tasks. The path name will be unique for each task. The path parent
+ will be the job output directory.</p>ls
+
+ <p>This method uses the {@link #getUniqueName} method to make the file name
+ unique for the task.</p>
+
+ @param conf the configuration for the job.
+ @param name the name for the file.
+ @return a unique path accross all tasks of the job.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base class for {@link OutputFormat}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileSplit -->
+ <class name="FileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[Constructs a split.
+ @deprecated
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process]]>
+ </doc>
+ </constructor>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null]]>
+ </doc>
+ </constructor>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file containing this split's data.]]>
+ </doc>
+ </method>
+ <method name="getStart" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The position of the first byte in the file to process.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the file to process.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A section of an input file. Returned by {@link
+ InputFormat#getSplits(JobConf, int)} and passed to
+ {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileSplit -->
+ <!-- start class org.apache.hadoop.mapred.ID -->
+ <class name="ID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable&lt;org.apache.hadoop.mapred.ID&gt;"/>
+ <constructor name="ID" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructs an ID object from the given int]]>
+ </doc>
+ </constructor>
+ <constructor name="ID"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[returns the int which represents the identifier]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare IDs by associated numbers]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.ID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.ID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct an ID object from given string
+
+ @return constructed Id object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A general identifier, which internally stores the id
+ as an integer. This is the super class of {@link JobID},
+ {@link TaskID} and {@link TaskAttemptID}.
+
+ @see JobID
+ @see TaskID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ID -->
+ <!-- start interface org.apache.hadoop.mapred.InputFormat -->
+ <interface name="InputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically split the set of input files for the job.
+
+ <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper}
+ for processing.</p>
+
+ <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the
+ input files are not physically split into chunks. For e.g. a split could
+ be <i>&lt;input-file-path, start, offset&gt;</i> tuple.
+
+ @param job job configuration.
+ @param numSplits the desired number of splits, a hint.
+ @return an array of {@link InputSplit}s for the job.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordReader} for the given {@link InputSplit}.
+
+ <p>It is the responsibility of the <code>RecordReader</code> to respect
+ record boundaries while processing the logical split to present a
+ record-oriented view to the individual task.</p>
+
+ @param split the {@link InputSplit}
+ @param job the job that this split belongs to
+ @return a {@link RecordReader}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputFormat</code> describes the input-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the input-specification of the job.
+ <li>
+ Split-up the input file(s) into logical {@link InputSplit}s, each of
+ which is then assigned to an individual {@link Mapper}.
+ </li>
+ <li>
+ Provide the {@link RecordReader} implementation to be used to glean
+ input records from the logical <code>InputSplit</code> for processing by
+ the {@link Mapper}.
+ </li>
+ </ol>
+
+ <p>The default behavior of file-based {@link InputFormat}s, typically
+ sub-classes of {@link FileInputFormat}, is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of the input files. However, the {@link FileSystem} blocksize of
+ the input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Clearly, logical splits based on input-size is insufficient for many
+ applications since record boundaries are to respected. In such cases, the
+ application has to also implement a {@link RecordReader} on whom lies the
+ responsibilty to respect record-boundaries and present a record-oriented
+ view of the logical <code>InputSplit</code> to the individual task.
+
+ @see InputSplit
+ @see RecordReader
+ @see JobClient
+ @see FileInputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.InputSplit -->
+ <interface name="InputSplit" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the total number of bytes in the data of the <code>InputSplit</code>.
+
+ @return the number of bytes in the input split.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hostnames where the input split is located.
+
+ @return list of hostnames where data of the <code>InputSplit</code> is
+ located as an array of <code>String</code>s.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputSplit</code> represents the data to be processed by an
+ individual {@link Mapper}.
+
+ <p>Typically, it presents a byte-oriented view on the input and is the
+ responsibility of {@link RecordReader} of the job to process this and present
+ a record-oriented view.
+
+ @see InputFormat
+ @see RecordReader]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputSplit -->
+ <!-- start class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <class name="InvalidFileTypeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidFileTypeException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidFileTypeException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when file type differs from the desired file type. like
+ getting a file when a directory is expected. Or a wrong file type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidInputException -->
+ <class name="InvalidInputException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidInputException" type="java.util.List&lt;java.io.IOException&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create the exception with the given list.
+ @param probs the list of problems to report. this list is not copied.]]>
+ </doc>
+ </constructor>
+ <method name="getProblems" return="java.util.List&lt;java.io.IOException&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete list of the problems reported.
+ @return the list of problems, which must not be modified]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get a summary message of the problems found.
+ @return the concatenated messages from all of the problems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class wraps a list of problems with the input, so that the user
+ can get a list of problems together instead of finding and fixing them one
+ by one.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidInputException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <class name="InvalidJobConfException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidJobConfException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidJobConfException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when jobconf misses some mendatory attributes
+ or value of some attributes is invalid.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <!-- start class org.apache.hadoop.mapred.IsolationRunner -->
+ <class name="IsolationRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IsolationRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Run a single task
+ @param args the first argument is the task directory]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.IsolationRunner -->
+ <!-- start class org.apache.hadoop.mapred.JobClient -->
+ <class name="JobClient" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobClient"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job client.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client with the given {@link JobConf}, and connect to the
+ default {@link JobTracker}.
+
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client, connect to the indicated job tracker.
+
+ @param jobTrackAddr the job tracker to connect to.
+ @param conf configuration.]]>
+ </doc>
+ </constructor>
+ <method name="getCommandLineConfig" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the command line configuration]]>
+ </doc>
+ </method>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Connect to the default {@link JobTracker}.
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the <code>JobClient</code>.]]>
+ </doc>
+ </method>
+ <method name="getFs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a filesystem handle. We need this to prepare jobs
+ for submission to the MapReduce system.
+
+ @return the filesystem handle.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobFile" type="java.lang.String"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param jobFile the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param job the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isJobDirValid" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobDirPath" type="org.apache.hadoop.fs.Path"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Checks if the job directory is clean and has all the required components
+ for (re) starting the job]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an {@link RunningJob} object to track an ongoing job. Returns
+ null if the id does not correspond to any known job.
+
+ @param jobid the jobid of the job.
+ @return the {@link RunningJob} handle to track the job, null if the
+ <code>jobid</code> doesn't correspond to any known job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getJob(JobID)}.">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getJob(JobID)}.]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the map tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the map tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getMapTaskReports(JobID)}">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getMapTaskReports(JobID)}]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the reduce tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the reduce tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCleanupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the cleanup tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the cleanup tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getSetupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the setup tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the setup tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getReduceTaskReports(JobID)}">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getReduceTaskReports(JobID)}]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the Map-Reduce cluster.
+
+ @return the status information about the Map-Reduce cluster as an object
+ of {@link ClusterStatus}.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are not completed and not failed.
+
+ @return array of {@link JobStatus} for the running/to-be-run jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are submitted.
+
+ @return array of {@link JobStatus} for the submitted jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="runJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Utility that submits a job, then polls for progress until the job is
+ complete.
+
+ @param job the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Sets the output filter for tasks. only those tasks are printed whose
+ output matches the filter.
+ @param newValue task filter.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the task output filter out of the JobConf.
+
+ @param job the JobConf to examine.
+ @return the filter level.]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Modify the JobConf to set the task output filter.
+
+ @param job the JobConf to modify.
+ @param newValue the value to set.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task output filter.
+ @return task filter.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="getDefaultMaps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the max available Maps in the cluster.
+
+ @return the max available Maps in the cluster
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDefaultReduces" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the max available Reduces in the cluster.
+
+ @return the max available Reduces in the cluster
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getSystemDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Grab the jobtracker system directory path where job-specific files are to be placed.
+
+ @return the system directory where job-specific files are to be placed.]]>
+ </doc>
+ </method>
+ <method name="getQueues" return="org.apache.hadoop.mapred.JobQueueInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array of queue information objects about all the Job Queues
+ configured.
+
+ @return Array of JobQueueInfo objects
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJobsFromQueue" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queueName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets all the jobs which were added to particular Job Queue
+
+ @param queueName name of the Job Queue
+ @return Array of jobs present in the job queue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getQueueInfo" return="org.apache.hadoop.mapred.JobQueueInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queueName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the queue information associated to a particular Job Queue
+
+ @param queueName name of the job queue.
+ @return Queue information associated to particular queue.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[<code>JobClient</code> is the primary interface for the user-job to interact
+ with the {@link JobTracker}.
+
+ <code>JobClient</code> provides facilities to submit jobs, track their
+ progress, access component-tasks' reports/logs, get the Map-Reduce cluster
+ status information etc.
+
+ <p>The job submission process involves:
+ <ol>
+ <li>
+ Checking the input and output specifications of the job.
+ </li>
+ <li>
+ Computing the {@link InputSplit}s for the job.
+ </li>
+ <li>
+ Setup the requisite accounting information for the {@link DistributedCache}
+ of the job, if necessary.
+ </li>
+ <li>
+ Copying the job's jar and configuration to the map-reduce system directory
+ on the distributed file-system.
+ </li>
+ <li>
+ Submitting the job to the <code>JobTracker</code> and optionally monitoring
+ it's status.
+ </li>
+ </ol></p>
+
+ Normally the user creates the application, describes various facets of the
+ job via {@link JobConf} and then uses the <code>JobClient</code> to submit
+ the job and monitor its progress.
+
+ <p>Here is an example on how to use <code>JobClient</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ job.setInputPath(new Path("in"));
+ job.setOutputPath(new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ </pre></blockquote></p>
+
+ <h4 id="JobControl">Job Control</h4>
+
+ <p>At times clients would chain map-reduce jobs to accomplish complex tasks
+ which cannot be done via a single map-reduce job. This is fairly easy since
+ the output of the job, typically, goes to distributed file-system and that
+ can be used as the input for the next job.</p>
+
+ <p>However, this also means that the onus on ensuring jobs are complete
+ (success/failure) lies squarely on the clients. In such situations the
+ various job-control options are:
+ <ol>
+ <li>
+ {@link #runJob(JobConf)} : submits the job and returns only after
+ the job has completed.
+ </li>
+ <li>
+ {@link #submitJob(JobConf)} : only submits the job, then poll the
+ returned handle to the {@link RunningJob} to query status and make
+ scheduling decisions.
+ </li>
+ <li>
+ {@link JobConf#setJobEndNotificationURI(String)} : setup a notification
+ on job-completion, thus avoiding polling.
+ </li>
+ </ol></p>
+
+ @see JobConf
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient -->
+ <!-- start class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <class name="JobClient.TaskStatusFilter" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobClient.TaskStatusFilter&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <!-- start class org.apache.hadoop.mapred.JobConf -->
+ <class name="JobConf" extends="org.apache.hadoop.conf.Configuration"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new map/reduce configuration where the behavior of reading from the
+ default resources can be turned off.
+ <p/>
+ If the parameter {@code loadDefaults} is false, the new instance
+ will not load resources from the default files.
+
+ @param loadDefaults specifies whether to load from the default files]]>
+ </doc>
+ </constructor>
+ <method name="getJar" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user jar for the map-reduce job.
+
+ @return the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJar"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jar" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user jar for the map-reduce job.
+
+ @param jar the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJarByClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the job's jar file by finding an example class location.
+
+ @param cls the example class.]]>
+ </doc>
+ </method>
+ <method name="getLocalDirs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="subdir" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a local file name. Files are distributed among configured
+ local directories.]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reported username for this job.
+
+ @return the username]]>
+ </doc>
+ </method>
+ <method name="setUser"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the reported username for this job.
+
+ @param user the username for this job.]]>
+ </doc>
+ </method>
+ <method name="setKeepFailedTaskFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the framework should keep the intermediate files for
+ failed tasks.
+
+ @param keep <code>true</code> if framework should keep the intermediate files
+ for failed tasks, <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="getKeepFailedTaskFiles" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should the temporary files for failed tasks be kept?
+
+ @return should the files be kept?]]>
+ </doc>
+ </method>
+ <method name="setKeepTaskFilesPattern"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pattern" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set a regular expression for task names that should be kept.
+ The regular expression ".*_m_000123_0" would keep the files
+ for the first instance of map 123 that ran.
+
+ @param pattern the java.util.regex.Pattern to match against the
+ task names.]]>
+ </doc>
+ </method>
+ <method name="getKeepTaskFilesPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the regular expression that is matched against the task names
+ to see if we need to keep the files.
+
+ @return the pattern as a string, if it was set, othewise null.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the default file system.
+
+ @param dir the new current working directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the default file system.
+
+ @return the directory name.]]>
+ </doc>
+ </method>
+ <method name="setNumTasksToExecutePerJvm"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numTasks" type="int"/>
+ <doc>
+ <![CDATA[Sets the number of tasks that a spawned task JVM should run
+ before it exits
+ @param numTasks the number of tasks to execute; defaults to 1;
+ -1 signifies no limit]]>
+ </doc>
+ </method>
+ <method name="getNumTasksToExecutePerJvm" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of tasks that a spawned JVM should execute]]>
+ </doc>
+ </method>
+ <method name="getInputFormat" return="org.apache.hadoop.mapred.InputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link InputFormat} implementation for the map-reduce job,
+ defaults to {@link TextInputFormat} if not specified explicity.
+
+ @return the {@link InputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link InputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link InputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="getOutputFormat" return="org.apache.hadoop.mapred.OutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link OutputFormat} implementation for the map-reduce job,
+ defaults to {@link TextOutputFormat} if not specified explicity.
+
+ @return the {@link OutputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getOutputCommitter" return="org.apache.hadoop.mapred.OutputCommitter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link OutputCommitter} implementation for the map-reduce job,
+ defaults to {@link FileOutputCommitter} if not specified explicitly.
+
+ @return the {@link OutputCommitter} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setOutputCommitter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputCommitter&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link OutputCommitter} implementation for the map-reduce job.
+
+ @param theClass the {@link OutputCommitter} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="setOutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputFormat&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link OutputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link OutputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="setCompressMapOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Should the map outputs be compressed before transfer?
+ Uses the SequenceFile compression.
+
+ @param compress should the map outputs be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressMapOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Are the outputs of the maps be compressed?
+
+ @return <code>true</code> if the outputs of the maps are to be compressed,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codecClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Set the given class as the {@link CompressionCodec} for the map outputs.
+
+ @param codecClass the {@link CompressionCodec} class that will compress
+ the map outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputCompressorClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultValue" type="java.lang.Class&lt;? extends org.apache.hadoop.io.compress.CompressionCodec&gt;"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the map outputs.
+
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} class that should be used to compress the
+ map outputs.
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getMapOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the map output data. If it is not set, use the
+ (final) output key class. This allows the map output key class to be
+ different than the final output key class.
+
+ @return the map output key class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the map output data. This allows the user to
+ specify the map output key class to be different than the final output
+ value class.
+
+ @param theClass the map output key class.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for the map output data. If it is not set, use the
+ (final) output value class This allows the map output value class to be
+ different than the final output value class.
+
+ @return the map output value class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for the map output data. This allows the user to
+ specify the map output value class to be different than the final output
+ value class.
+
+ @param theClass the map output value class.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the job output data.
+
+ @return the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the job output data.
+
+ @param theClass the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link RawComparator} comparator used to compare keys.
+
+ @return the {@link RawComparator} comparator used to compare keys.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyComparatorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.RawComparator&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link RawComparator} comparator used to compare keys.
+
+ @param theClass the {@link RawComparator} comparator used to
+ compare keys.
+ @see #setOutputValueGroupingComparator(Class)]]>
+ </doc>
+ </method>
+ <method name="setKeyFieldComparatorOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keySpec" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the {@link KeyFieldBasedComparator} options used to compare keys.
+
+ @param keySpec the key specification of the form -k pos1[,pos2], where,
+ pos is of the form f[.c][opts], where f is the number
+ of the key field to use, and c is the number of the first character from
+ the beginning of the field. Fields and character posns are numbered
+ starting with 1; a character position of zero in pos2 indicates the
+ field's last character. If '.c' is omitted from pos1, it defaults to 1
+ (the beginning of the field); if omitted from pos2, it defaults to 0
+ (the end of the field). opts are ordering options. The supported options
+ are:
+ -n, (Sort numerically)
+ -r, (Reverse the result of comparison)]]>
+ </doc>
+ </method>
+ <method name="getKeyFieldComparatorOption" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link KeyFieldBasedComparator} options]]>
+ </doc>
+ </method>
+ <method name="setKeyFieldPartitionerOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keySpec" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the {@link KeyFieldBasedPartitioner} options used for
+ {@link Partitioner}
+
+ @param keySpec the key specification of the form -k pos1[,pos2], where,
+ pos is of the form f[.c][opts], where f is the number
+ of the key field to use, and c is the number of the first character from
+ the beginning of the field. Fields and character posns are numbered
+ starting with 1; a character position of zero in pos2 indicates the
+ field's last character. If '.c' is omitted from pos1, it defaults to 1
+ (the beginning of the field); if omitted from pos2, it defaults to 0
+ (the end of the field).]]>
+ </doc>
+ </method>
+ <method name="getKeyFieldPartitionerOption" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link KeyFieldBasedPartitioner} options]]>
+ </doc>
+ </method>
+ <method name="getOutputValueGroupingComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user defined {@link WritableComparable} comparator for
+ grouping keys of inputs to the reduce.
+
+ @return comparator set by the user for grouping values.
+ @see #setOutputValueGroupingComparator(Class) for details.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueGroupingComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.io.RawComparator&gt;"/>
+ <doc>
+ <![CDATA[Set the user defined {@link RawComparator} comparator for
+ grouping keys in the input to the reduce.
+
+ <p>This comparator should be provided if the equivalence rules for keys
+ for sorting the intermediates are different from those for grouping keys
+ before each call to
+ {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
+
+ <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
+ in a single call to the reduce function if K1 and K2 compare as equal.</p>
+
+ <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
+ how keys are sorted, this can be used in conjunction to simulate
+ <i>secondary sort on values</i>.</p>
+
+ <p><i>Note</i>: This is not a guarantee of the reduce sort being
+ <i>stable</i> in any sense. (In any case, with the order of available
+ map-outputs to the reduce being non-deterministic, it wouldn't make
+ that much sense.)</p>
+
+ @param theClass the comparator class to be used for grouping keys.
+ It should implement <code>RawComparator</code>.
+ @see #setOutputKeyComparatorClass(Class)]]>
+ </doc>
+ </method>
+ <method name="getOutputValueClass" return="java.lang.Class&lt;?&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for job outputs.
+
+ @return the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for job outputs.
+
+ @param theClass the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapperClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Mapper} class for the job.
+
+ @return the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapperClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Mapper} class for the job.
+
+ @param theClass the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getMapRunnerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.MapRunnable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link MapRunnable} class for the job.
+
+ @return the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapRunnerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.MapRunnable&gt;"/>
+ <doc>
+ <![CDATA[Expert: Set the {@link MapRunnable} class for the job.
+
+ Typically used to exert greater control on {@link Mapper}s.
+
+ @param theClass the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getPartitionerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Partitioner&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Partitioner} used to partition {@link Mapper}-outputs
+ to be sent to the {@link Reducer}s.
+
+ @return the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setPartitionerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Partitioner&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Partitioner} class used to partition
+ {@link Mapper}-outputs to be sent to the {@link Reducer}s.
+
+ @param theClass the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getReducerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Reducer} class for the job.
+
+ @return the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setReducerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"/>
+ <doc>
+ <![CDATA[Set the {@link Reducer} class for the job.
+
+ @param theClass the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getCombinerClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers. Typically the combiner is same as the
+ the {@link Reducer} for the job i.e. {@link #getReducerClass()}.
+
+ @return the user-defined combiner class used to combine map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCombinerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&gt;"/>
+ <doc>
+ <![CDATA[Set the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers.
+
+ <p>The combiner is a task-level aggregation operation which, in some cases,
+ helps to cut down the amount of data transferred from the {@link Mapper} to
+ the {@link Reducer}, leading to better performance.</p>
+
+ <p>Typically the combiner is same as the the <code>Reducer</code> for the
+ job i.e. {@link #setReducerClass(Class)}.</p>
+
+ @param theClass the user-defined combiner class used to combine
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on, else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getMapSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for map tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be
+ used for this job for map tasks,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for map tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for map tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getReduceSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for reduce tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used
+ for reduce tasks for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setReduceSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for reduce tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for reduce tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getNumMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job.
+ Defaults to <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumMapTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the number of map tasks for this job.
+
+ <p><i>Note</i>: This is only a <i>hint</i> to the framework. The actual
+ number of spawned map tasks depends on the number of {@link InputSplit}s
+ generated by the job's {@link InputFormat#getSplits(JobConf, int)}.
+
+ A custom {@link InputFormat} is typically used to accurately control
+ the number of map tasks for the job.</p>
+
+ <h4 id="NoOfMaps">How many maps?</h4>
+
+ <p>The number of maps is usually driven by the total size of the inputs
+ i.e. total number of blocks of the input files.</p>
+
+ <p>The right level of parallelism for maps seems to be around 10-100 maps
+ per-node, although it has been set up to 300 or so for very cpu-light map
+ tasks. Task setup takes awhile, so it is best if the maps take at least a
+ minute to execute.</p>
+
+ <p>The default behavior of file-based {@link InputFormat}s is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of input files. However, the {@link FileSystem} blocksize of the
+ input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Thus, if you expect 10TB of input data and have a blocksize of 128MB,
+ you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is
+ used to set it even higher.</p>
+
+ @param n the number of map tasks for this job.
+ @see InputFormat#getSplits(JobConf, int)
+ @see FileInputFormat
+ @see FileSystem#getDefaultBlockSize()
+ @see FileStatus#getBlockSize()]]>
+ </doc>
+ </method>
+ <method name="getNumReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job. Defaults to
+ <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumReduceTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the requisite number of reduce tasks for this job.
+
+ <h4 id="NoOfReduces">How many reduces?</h4>
+
+ <p>The right number of reduces seems to be <code>0.95</code> or
+ <code>1.75</code> multiplied by (&lt;<i>no. of nodes</i>&gt; *
+ <a href="{@docRoot}/../hadoop-default.html#mapred.tasktracker.reduce.tasks.maximum">
+ mapred.tasktracker.reduce.tasks.maximum</a>).
+ </p>
+
+ <p>With <code>0.95</code> all of the reduces can launch immediately and
+ start transfering map outputs as the maps finish. With <code>1.75</code>
+ the faster nodes will finish their first round of reduces and launch a
+ second wave of reduces doing a much better job of load balancing.</p>
+
+ <p>Increasing the number of reduces increases the framework overhead, but
+ increases load balancing and lowers the cost of failures.</p>
+
+ <p>The scaling factors above are slightly less than whole numbers to
+ reserve a few reduce slots in the framework for speculative-tasks, failures
+ etc.</p>
+
+ <h4 id="ReducerNone">Reducer NONE</h4>
+
+ <p>It is legal to set the number of reduce-tasks to <code>zero</code>.</p>
+
+ <p>In this case the output of the map-tasks directly go to distributed
+ file-system, to the path set by
+ {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Also, the
+ framework doesn't sort the map-outputs before writing it out to HDFS.</p>
+
+ @param n the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ map task, as specified by the <code>mapred.map.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ map task.
+
+ @param n the number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ reduce task, as specified by the <code>mapred.reduce.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ reduce task.
+
+ @param n the number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name. This is only used to identify the
+ job to the user.
+
+ @return the job's name, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified job name.
+
+ @param name the job's new name.]]>
+ </doc>
+ </method>
+ <method name="getSessionId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified session identifier. The default is the empty string.
+
+ The session identifier is used to tag metric data that is reported to some
+ performance metrics system via the org.apache.hadoop.metrics API. The
+ session identifier is intended, in particular, for use by Hadoop-On-Demand
+ (HOD) which allocates a virtual Hadoop cluster dynamically and transiently.
+ HOD will set the session identifier by modifying the hadoop-site.xml file
+ before starting the cluster.
+
+ When not running under HOD, this identifer is expected to remain set to
+ the empty string.
+
+ @return the session identifier, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setSessionId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sessionId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified session identifier.
+
+ @param sessionId the new session id.]]>
+ </doc>
+ </method>
+ <method name="setMaxTaskFailuresPerTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="noFailures" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds <code>noFailures</code>, the
+ tasktracker is <i>blacklisted</i> for this job.
+
+ @param noFailures maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxTaskFailuresPerTracker" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Expert: Get the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds this, the tasktracker is
+ <i>blacklisted</i> for this job.
+
+ @return the maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of map tasks that can fail without
+ the job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed map-task results in
+ the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the maximum percentage of map tasks that can fail without the
+ job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts} attempts
+ before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of reduce tasks that can fail without
+ the job being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed reduce-task results
+ in the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum percentage of reduce tasks that can fail without the job
+ being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="prio" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Set {@link JobPriority} for this job.
+
+ @param prio the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link JobPriority} for this job.
+
+ @return the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getProfileEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get whether the task profiling is enabled.
+ @return true if some tasks will be profiled]]>
+ </doc>
+ </method>
+ <method name="setProfileEnabled"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the system should collect profiler information for some of
+ the tasks in this job? The information is stored in the the user log
+ directory.
+ @param newValue true means it should be gathered]]>
+ </doc>
+ </method>
+ <method name="getProfileParams" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the profiler configuration arguments.
+
+ The default value for this property is
+ "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s"
+
+ @return the parameters to pass to the task child to configure profiling]]>
+ </doc>
+ </method>
+ <method name="setProfileParams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the profiler configuration arguments. If the string contains a '%s' it
+ will be replaced with the name of the profiling output file when the task
+ runs.
+
+ This value is passed to the task child JVM on the command line.
+
+ @param value the configuration string]]>
+ </doc>
+ </method>
+ <method name="getProfileTaskRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <doc>
+ <![CDATA[Get the range of maps or reduces to profile.
+ @param isMap is the task a map?
+ @return the task ranges]]>
+ </doc>
+ </method>
+ <method name="setProfileTaskRange"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <param name="newValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the ranges of maps or reduces to profile. setProfileEnabled(true)
+ must also be called.
+ @param newValue a set of integer ranges of the map ids]]>
+ </doc>
+ </method>
+ <method name="setMapDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the map tasks fail.
+
+ <p>The debug script can aid debugging of failed map tasks. The script is
+ given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script needs to be symlinked. </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setMapDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param mDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getMapDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the map task's debug script.
+
+ @return the debug Script for the mapred job for failed map tasks.
+ @see #setMapDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="setReduceDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the reduce tasks fail.
+
+ <p>The debug script can aid debugging of failed reduce tasks. The script
+ is given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script file needs to be symlinked </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setReduceDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param rDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getReduceDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reduce task's debug Script
+
+ @return the debug script for the mapred job for failed reduce tasks.
+ @see #setReduceDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="getJobEndNotificationURI" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ @return the job end notification uri, <code>null</code> if it hasn't
+ been set.
+ @see #setJobEndNotificationURI(String)]]>
+ </doc>
+ </method>
+ <method name="setJobEndNotificationURI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ <p>The uri can contain 2 special parameters: <tt>$jobId</tt> and
+ <tt>$jobStatus</tt>. Those, if present, are replaced by the job's
+ identifier and completion-status respectively.</p>
+
+ <p>This is typically used by application-writers to implement chaining of
+ Map-Reduce jobs in an <i>asynchronous manner</i>.</p>
+
+ @param uri the job end notification uri
+ @see JobStatus
+ @see <a href="{@docRoot}/org/apache/hadoop/mapred/JobClient.html#JobCompletionAndChaining">Job Completion and Chaining</a>]]>
+ </doc>
+ </method>
+ <method name="getJobLocalDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get job-specific shared directory for use as scratch space
+
+ <p>
+ When a job starts, a shared directory is created at location
+ <code>
+ ${mapred.local.dir}/taskTracker/jobcache/$jobid/work/ </code>.
+ This directory is exposed to the users through
+ <code>job.local.dir </code>.
+ So, the tasks can use this space
+ as scratch space and share files among them. </p>
+ This value is available as System property also.
+
+ @return The localized job specific shared directory]]>
+ </doc>
+ </method>
+ <method name="getQueueName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the name of the queue to which this job is submitted.
+ Defaults to 'default'.
+
+ @return name of the queue]]>
+ </doc>
+ </method>
+ <method name="setQueueName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queueName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the name of the queue to which this job should be submitted.
+
+ @param queueName Name of the queue]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_QUEUE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Name of the queue to which jobs will be submitted, if no queue
+ name is mentioned.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A map/reduce job configuration.
+
+ <p><code>JobConf</code> is the primary interface for a user to describe a
+ map-reduce job to the Hadoop framework for execution. The framework tries to
+ faithfully execute the job as-is described by <code>JobConf</code>, however:
+ <ol>
+ <li>
+ Some configuration parameters might have been marked as
+ <a href="{@docRoot}/org/apache/hadoop/conf/Configuration.html#FinalParams">
+ final</a> by administrators and hence cannot be altered.
+ </li>
+ <li>
+ While some job parameters are straight-forward to set
+ (e.g. {@link #setNumReduceTasks(int)}), some parameters interact subtly
+ rest of the framework and/or job-configuration and is relatively more
+ complex for the user to control finely (e.g. {@link #setNumMapTasks(int)}).
+ </li>
+ </ol></p>
+
+ <p><code>JobConf</code> typically specifies the {@link Mapper}, combiner
+ (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and
+ {@link OutputFormat} implementations to be used etc.
+
+ <p>Optionally <code>JobConf</code> is used to specify other advanced facets
+ of the job such as <code>Comparator</code>s to be used, files to be put in
+ the {@link DistributedCache}, whether or not intermediate and/or job outputs
+ are to be compressed (and how), debugability via user-provided scripts
+ ( {@link #setMapDebugScript(String)}/{@link #setReduceDebugScript(String)}),
+ for doing post-processing on task logs, task's stdout, stderr, syslog.
+ and etc.</p>
+
+ <p>Here is an example on how to configure a job via <code>JobConf</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ FileInputFormat.setInputPaths(job, new Path("in"));
+ FileOutputFormat.setOutputPath(job, new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setCombinerClass(MyJob.MyReducer.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ job.setInputFormat(SequenceFileInputFormat.class);
+ job.setOutputFormat(SequenceFileOutputFormat.class);
+ </pre></blockquote></p>
+
+ @see JobClient
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobConf -->
+ <!-- start interface org.apache.hadoop.mapred.JobConfigurable -->
+ <interface name="JobConfigurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Initializes a new instance from a {@link JobConf}.
+
+ @param job the configuration]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[That what may be configured.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobConfigurable -->
+ <!-- start class org.apache.hadoop.mapred.JobContext -->
+ <class name="JobContext" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job Configuration
+
+ @return JobConf]]>
+ </doc>
+ </method>
+ <method name="getProgressible" return="org.apache.hadoop.util.Progressable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the progress mechanism for reporting progress.
+
+ @return progress mechanism]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobContext -->
+ <!-- start class org.apache.hadoop.mapred.JobEndNotifier -->
+ <class name="JobEndNotifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobEndNotifier"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="startNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="stopNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="registerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ <method name="localRunnerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobEndNotifier -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory -->
+ <class name="JobHistory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="hostname" type="java.lang.String"/>
+ <param name="jobTrackerStartTime" type="long"/>
+ <doc>
+ <![CDATA[Initialize JobHistory files.
+ @param conf Jobconf of the job tracker.
+ @param hostname jobtracker's hostname
+ @param jobTrackerStartTime jobtracker's start time
+ @return true if intialized properly
+ false otherwise]]>
+ </doc>
+ </method>
+ <method name="parseHistoryFromFS"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="l" type="org.apache.hadoop.mapred.JobHistory.Listener"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parses history file and invokes Listener.handle() for
+ each line of history. It can be used for looking through history
+ files for specific items without having to keep whole history in memory.
+ @param path path to history file
+ @param l Listener for history events
+ @param fs FileSystem where history file is present
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isDisableHistory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns history disable status. by default history is enabled so this
+ method returns false.
+ @return true if history logging is disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="setDisableHistory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="disableHistory" type="boolean"/>
+ <doc>
+ <![CDATA[Enable/disable history logging. Default value is false, so history
+ is enabled by default.
+ @param disableHistory true if history should be disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getTaskLogsUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attempt" type="org.apache.hadoop.mapred.JobHistory.TaskAttempt"/>
+ <doc>
+ <![CDATA[Return the TaskLogsUrl of a particular TaskAttempt
+
+ @param attempt
+ @return the taskLogsUrl. null if http-port or tracker-name or
+ task-attempt-id are unavailable.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JOB_NAME_TRIM_LENGTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provides methods for writing to and reading from job history.
+ Job History works in an append mode, JobHistory and its inner classes provide methods
+ to log job events.
+
+ JobHistory is split into multiple files, format of each file is plain text where each line
+ is of the format [type (key=value)*], where type identifies the type of the record.
+ Type maps to UID of one of the inner classes of this class.
+
+ Job history is maintained in a master index which contains star/stop times of all jobs with
+ a few other job level properties. Apart from this each job's history is maintained in a seperate history
+ file. name of job history files follows the format jobtrackerId_jobid
+
+ For parsing the job history it supports a listener based interface where each line is parsed
+ and passed to listener. The listener can create an object model of history or look for specific
+ events and discard rest of the history.
+
+ CHANGE LOG :
+ Version 0 : The history has the following format :
+ TAG KEY1="VALUE1" KEY2="VALUE2" and so on.
+ TAG can be Job, Task, MapAttempt or ReduceAttempt.
+ Note that a '"' is the line delimiter.
+ Version 1 : Changes the line delimiter to '.'
+ Values are now escaped for unambiguous parsing.
+ Added the Meta tag to store version info.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <class name="JobHistory.HistoryCleaner" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobHistory.HistoryCleaner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Cleans up history data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Delete history files older than one month. Update master index and remove all
+ jobs older than one month. Also if a job tracker has no jobs in last one month
+ remove reference to the job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <class name="JobHistory.JobInfo" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.JobInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create new JobInfo]]>
+ </doc>
+ </constructor>
+ <method name="getAllTasks" return="java.util.Map&lt;java.lang.String, org.apache.hadoop.mapred.JobHistory.Task&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all map and reduce tasks <taskid-Task>.]]>
+ </doc>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Get the path of the locally stored job file
+ @param jobId id of the job
+ @return the path of the job file on the local file system]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFile" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the path of the job-history
+ log file.
+
+ @param logFile path of the job-history file
+ @return URL encoded path
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL encoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="decodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to decode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL decoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the user name from the job conf]]>
+ </doc>
+ </method>
+ <method name="getJobHistoryLogLocation" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the job history file path given the history filename]]>
+ </doc>
+ </method>
+ <method name="getJobHistoryLogLocationForUser" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the user job history file path]]>
+ </doc>
+ </method>
+ <method name="getJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="id" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recover the job history filename from the history folder.
+ Uses the following pattern
+ $jt-hostname_[0-9]*_$job-id_$user-$job-name*
+ @param jobConf the job conf
+ @param id job id]]>
+ </doc>
+ </method>
+ <method name="recoverJobHistoryFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="logFilePath" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Selects one of the two files generated as a part of recovery.
+ The thumb rule is that always select the oldest file.
+ This call makes sure that only one file is left in the end.
+ @param conf job conf
+ @param logFilePath Path of the log file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logSubmitted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="jobConfPath" type="java.lang.String"/>
+ <param name="submitTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Log job submitted event to history. Creates a new file in history
+ for the job. if history file creation fails, it disables history
+ for all other events.
+ @param jobId job id assigned by job tracker.
+ @param jobConf job conf of the job
+ @param jobConfPath path to job conf xml file in HDFS.
+ @param submitTime time when job tracker received the job
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logInited"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs launch time of job.
+
+ @param jobId job id, assigned by jobtracker.
+ @param startTime start time of job.
+ @param totalMaps total maps assigned by jobtracker.
+ @param totalReduces total reduces.]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link #logInited(JobID, long, int, int)} and
+ {@link #logStarted(JobID)}">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs the job as RUNNING.
+
+ @param jobId job id, assigned by jobtracker.
+ @param startTime start time of job.
+ @param totalMaps total maps assigned by jobtracker.
+ @param totalReduces total reduces.
+ @deprecated Use {@link #logInited(JobID, long, int, int)} and
+ {@link #logStarted(JobID)}]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Logs job as running
+ @param jobId job id, assigned by jobtracker.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="finishTime" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <param name="failedMaps" type="int"/>
+ <param name="failedReduces" type="int"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log job finished. closes the job file in history.
+ @param jobId job id, assigned by jobtracker.
+ @param finishTime finish time of job in ms.
+ @param finishedMaps no of maps successfully finished.
+ @param finishedReduces no of reduces finished sucessfully.
+ @param failedMaps no of failed map tasks.
+ @param failedReduces no of failed reduce tasks.
+ @param counters the counters from the job]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs job failed event. Closes the job history log file.
+ @param jobid job id
+ @param timestamp time when job failure was detected in ms.
+ @param finishedMaps no finished map tasks.
+ @param finishedReduces no of finished reduce tasks.]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs job killed event. Closes the job history log file.
+
+ @param jobid
+ job id
+ @param timestamp
+ time when job killed was issued in ms.
+ @param finishedMaps
+ no finished map tasks.
+ @param finishedReduces
+ no of finished reduce tasks.]]>
+ </doc>
+ </method>
+ <method name="logJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="priority" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Log job's priority.
+ @param jobid job id
+ @param priority Jobs priority]]>
+ </doc>
+ </method>
+ <method name="logJobInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="submitTime" type="long"/>
+ <param name="launchTime" type="long"/>
+ <param name="restartCount" type="int"/>
+ <doc>
+ <![CDATA[Log job's submit-time/launch-time
+ @param jobid job id
+ @param submitTime job's submit time
+ @param launchTime job's launch time
+ @param restartCount number of times the job got restarted]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to job start, finish or failure.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <class name="JobHistory.Keys" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.Keys&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Keys[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Keys"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Job history files contain key="value" pairs, where keys belong to this enum.
+ It acts as a global namespace for all keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <!-- start interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <interface name="JobHistory.Listener" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="handle"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recType" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"/>
+ <param name="values" type="java.util.Map&lt;org.apache.hadoop.mapred.JobHistory.Keys, java.lang.String&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Callback method for history parser.
+ @param recType type of record, which is the first entry in the line.
+ @param values a map of key-value pairs as thry appear in history.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Callback interface for reading back log events from JobHistory. This interface
+ should be implemented and passed to JobHistory.parseHistory()]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <class name="JobHistory.MapAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.MapAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logStarted(TaskAttemptID, long, String, int, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of this map task attempt.
+ @param taskAttemptId task attempt id
+ @param startTime start time of task attempt as reported by task tracker.
+ @param hostName host name of the task attempt.
+ @deprecated Use
+ {@link #logStarted(TaskAttemptID, long, String, int, String)}]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="trackerName" type="java.lang.String"/>
+ <param name="httpPort" type="int"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of this map task attempt.
+
+ @param taskAttemptId task attempt id
+ @param startTime start time of task attempt as reported by task tracker.
+ @param trackerName name of the tracker executing the task attempt.
+ @param httpPort http port of the task tracker executing the task attempt
+ @param taskType Whether the attempt is cleanup or setup or map]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logFinished(TaskAttemptID, long, String, String, String, Counters)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finish time of map task attempt.
+ @param taskAttemptId task attempt id
+ @param finishTime finish time
+ @param hostName host name
+ @deprecated Use
+ {@link #logFinished(TaskAttemptID, long, String, String, String, Counters)}]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="stateString" type="java.lang.String"/>
+ <param name="counter" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finish time of map task attempt.
+
+ @param taskAttemptId task attempt id
+ @param finishTime finish time
+ @param hostName host name
+ @param taskType Whether the attempt is cleanup or setup or map
+ @param stateString state string of the task attempt
+ @param counter counters of the task attempt]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logFailed(TaskAttemptID, long, String, String, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt failed event.
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.
+ @deprecated Use
+ {@link #logFailed(TaskAttemptID, long, String, String, String)}]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt failed event.
+
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.
+ @param taskType Whether the attempt is cleanup or setup or map]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logKilled(TaskAttemptID, long, String, String, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt killed event.
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.
+ @deprecated Use
+ {@link #logKilled(TaskAttemptID, long, String, String, String)}]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt killed event.
+
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.
+ @param taskType Whether the attempt is cleanup or setup or map]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <class name="JobHistory.RecordTypes" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.RecordTypes&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.RecordTypes[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Record types are identifiers for each line of log in history files.
+ A record type appears as the first token in a single line of log.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <class name="JobHistory.ReduceAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.ReduceAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logStarted(TaskAttemptID, long, String, int, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of Reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param startTime start time
+ @param hostName host name
+ @deprecated Use
+ {@link #logStarted(TaskAttemptID, long, String, int, String)}]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="trackerName" type="java.lang.String"/>
+ <param name="httpPort" type="int"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of Reduce task attempt.
+
+ @param taskAttemptId task attempt id
+ @param startTime start time
+ @param trackerName tracker name
+ @param httpPort the http port of the tracker executing the task attempt
+ @param taskType Whether the attempt is cleanup or setup or reduce]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logFinished(TaskAttemptID, long, long, long, String, String, String, Counters)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finished event of this task.
+ @param taskAttemptId task attempt id
+ @param shuffleFinished shuffle finish time
+ @param sortFinished sort finish time
+ @param finishTime finish time of task
+ @param hostName host name where task attempt executed
+ @deprecated Use
+ {@link #logFinished(TaskAttemptID, long, long, long, String, String, String, Counters)}]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="stateString" type="java.lang.String"/>
+ <param name="counter" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finished event of this task.
+
+ @param taskAttemptId task attempt id
+ @param shuffleFinished shuffle finish time
+ @param sortFinished sort finish time
+ @param finishTime finish time of task
+ @param hostName host name where task attempt executed
+ @param taskType Whether the attempt is cleanup or setup or reduce
+ @param stateString the state string of the attempt
+ @param counter counters of the attempt]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logFailed(TaskAttemptID, long, String, String, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log failed reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.
+ @deprecated Use
+ {@link #logFailed(TaskAttemptID, long, String, String, String)}]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log failed reduce task attempt.
+
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.
+ @param taskType Whether the attempt is cleanup or setup or reduce]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logKilled(TaskAttemptID, long, String, String, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log killed reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.
+ @deprecated Use
+ {@link #logKilled(TaskAttemptID, long, String, String, String)}]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log killed reduce task attempt.
+
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.
+ @param taskType Whether the attempt is cleanup or setup or reduce]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Task -->
+ <class name="JobHistory.Task" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.Task"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="splitLocations" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of task (TIP).
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param startTime startTime of tip.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finish time of task.
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param finishTime finish timeof task in ms]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log job failed event.
+ @param taskId task id
+ @param taskType MAP or REDUCE.
+ @param time timestamp when job failed detected.
+ @param error error message for failure.]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="failedDueToAttempt" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[@param failedDueToAttempt The attempt that caused the failure, if any]]>
+ </doc>
+ </method>
+ <method name="getTaskAttempts" return="java.util.Map&lt;java.lang.String, org.apache.hadoop.mapred.JobHistory.TaskAttempt&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all task attempts for this task. <task attempt id - TaskAttempt>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to Task's start, finish or failure.
+ All events logged by this class are logged in a separate file per job in
+ job tracker history. These events map to TIPs in jobtracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Task -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <class name="JobHistory.TaskAttempt" extends="org.apache.hadoop.mapred.JobHistory.Task"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.TaskAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Base class for Map and Reduce TaskAttempts.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Values -->
+ <class name="JobHistory.Values" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobHistory.Values&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Values[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Values"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[This enum contains some of the values commonly used by history log events.
+ since values in history can only be strings - Values.name() is used in
+ most places in history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Values -->
+ <!-- start class org.apache.hadoop.mapred.JobID -->
+ <class name="JobID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobID" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a JobID object
+ @param jtIdentifier jobTracker identifier
+ @param id job number]]>
+ </doc>
+ </constructor>
+ <method name="getJtIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare JobIds by first jtIdentifiers, then by job numbers]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a JobId object from given string
+ @return constructed JobId object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getJobIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>any job</i>
+ run on the jobtracker started at <i>200707121733</i>, we would use :
+ <pre>
+ JobID.getTaskIDsPattern("200707121733", null);
+ </pre>
+ which will return :
+ <pre> "job_200707121733_[0-9]*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @return a regex pattern matching JobIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[JobID represents the immutable and unique identifier for
+ the job. JobID consists of two parts. First part
+ represents the jobtracker identifier, so that jobID to jobtracker map
+ is defined. For cluster setup this string is the jobtracker
+ start time, for local setting, it is "local".
+ Second part of the JobID is the job number. <br>
+ An example JobID is :
+ <code>job_200707121733_0003</code> , which represents the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse JobID strings, but rather
+ use appropriate constructors or {@link #forName(String)} method.
+
+ @see TaskID
+ @see TaskAttemptID
+ @see JobTracker#getNewJobId()
+ @see JobTracker#getStartTime()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobID -->
+ <!-- start class org.apache.hadoop.mapred.JobPriority -->
+ <class name="JobPriority" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobPriority&gt;"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobPriority[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Used to describe the priority of the running job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobPriority -->
+ <!-- start class org.apache.hadoop.mapred.JobProfile -->
+ <class name="JobProfile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobProfile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an empty {@link JobProfile}.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, org.apache.hadoop.mapred.JobID, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a {@link JobProfile} the userid, jobid,
+ job config-file, job-details url and job name.
+
+ @param user userid of the person who submitted the job.
+ @param jobid id of the job.
+ @param jobFile job configuration file.
+ @param url link to the web-ui for details of the job.
+ @param name user-specified job name.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, org.apache.hadoop.mapred.JobID, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a {@link JobProfile} the userid, jobid,
+ job config-file, job-details url and job name.
+
+ @param user userid of the person who submitted the job.
+ @param jobid id of the job.
+ @param jobFile job configuration file.
+ @param url link to the web-ui for details of the job.
+ @param name user-specified job name.
+ @param queueName name of the queue to which the job is submitted]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="use JobProfile(String, JobID, String, String, String) instead">
+ <doc>
+ <![CDATA[@deprecated use JobProfile(String, JobID, String, String, String) instead]]>
+ </doc>
+ </constructor>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user id.]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job id.]]>
+ </doc>
+ </method>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use getJobID() instead">
+ <doc>
+ <![CDATA[@deprecated use getJobID() instead]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configuration file for the job.]]>
+ </doc>
+ </method>
+ <method name="getURL" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the link to the web-ui for details of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name.]]>
+ </doc>
+ </method>
+ <method name="getQueueName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the queue to which the job is submitted.
+ @return name of the queue.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A JobProfile is a MapReduce primitive. Tracks a job,
+ whether living or dead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobProfile -->
+ <!-- start class org.apache.hadoop.mapred.JobQueueInfo -->
+ <class name="JobQueueInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobQueueInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor for Job Queue Info.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobQueueInfo" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a new JobQueueInfo object using the queue name and the
+ scheduling information passed.
+
+ @param queueName Name of the job queue
+ @param schedulingInfo Scheduling Information associated with the job
+ queue]]>
+ </doc>
+ </constructor>
+ <method name="setQueueName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queueName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the queue name of the JobQueueInfo
+
+ @param queueName Name of the job queue.]]>
+ </doc>
+ </method>
+ <method name="getQueueName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the queue name from JobQueueInfo
+
+ @return queue name]]>
+ </doc>
+ </method>
+ <method name="setSchedulingInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="schedulingInfo" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the scheduling information associated to particular job queue
+
+ @param schedulingInfo]]>
+ </doc>
+ </method>
+ <method name="getSchedulingInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the scheduling information associated to particular job queue.
+ If nothing is set would return <b>"N/A"</b>
+
+ @return Scheduling information associated to particular Job Queue]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Class that contains the information regarding the Job Queues which are
+ maintained by the Hadoop Map/Reduce framework.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobQueueInfo -->
+ <!-- start class org.apache.hadoop.mapred.JobShell -->
+ <class name="JobShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run method from Tool]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Provide command line parsing for JobSubmission
+ job submission looks like
+ hadoop jar -libjars <comma seperated jars> -archives <comma seperated archives>
+ -files <comma seperated files> inputjar args]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobShell -->
+ <!-- start class org.apache.hadoop.mapred.JobStatus -->
+ <class name="JobStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="JobStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on cleanup
+ @param runState The current state of the job]]>
+ </doc>
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job]]>
+ </doc>
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, int, org.apache.hadoop.mapred.JobPriority"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job
+ @param jp Priority of the job.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, float, int, org.apache.hadoop.mapred.JobPriority"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.]]>
+ </doc>
+ </constructor>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use getJobID instead">
+ <doc>
+ <![CDATA[@deprecated use getJobID instead]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The jobid of the Job]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in maps]]>
+ </doc>
+ </method>
+ <method name="cleanupProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in cleanup]]>
+ </doc>
+ </method>
+ <method name="setupProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in setup]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in reduce]]>
+ </doc>
+ </method>
+ <method name="getRunState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return running state of the job]]>
+ </doc>
+ </method>
+ <method name="setRunState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Change the current run state of the job.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return start time of the job]]>
+ </doc>
+ </method>
+ <method name="clone" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUsername" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the username of the job]]>
+ </doc>
+ </method>
+ <method name="getSchedulingInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the Scheduling information associated to a particular Job.
+ @return the scheduling information of the job]]>
+ </doc>
+ </method>
+ <method name="setSchedulingInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="schedulingInfo" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Used to set the scheduling information associated to a particular Job.
+
+ @param schedulingInfo Scheduling information of the job]]>
+ </doc>
+ </method>
+ <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the priority of the job
+ @return job priority]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jp" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Set the priority of the job, defaulting to NORMAL.
+ @param jp new job priority]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SUCCEEDED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PREP" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="KILLED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Describes the current status of a job. This is
+ not intended to be a comprehensive piece of data.
+ For that, look at JobProfile.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobStatus -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker -->
+ <class name="JobTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.InterTrackerProtocol"/>
+ <implements name="org.apache.hadoop.mapred.JobSubmissionProtocol"/>
+ <implements name="org.apache.hadoop.mapred.TaskTrackerManager"/>
+ <method name="startTracker" return="org.apache.hadoop.mapred.JobTracker"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker with given configuration.
+
+ The conf will be modified to reflect the actual ports on which
+ the JobTracker is up and running if the user passes the port as
+ <code>zero</code>.
+
+ @param conf configuration for the JobTracker.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="hasRestarted" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Whether the JT has restarted]]>
+ </doc>
+ </method>
+ <method name="hasRecovered" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Whether the JT has recovered upon restart]]>
+ </doc>
+ </method>
+ <method name="getRecoveryDuration" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[How long the jobtracker took to recover from restart.]]>
+ </doc>
+ </method>
+ <method name="getInstrumentationClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.JobTrackerInstrumentation&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setInstrumentationClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="t" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.JobTrackerInstrumentation&gt;"/>
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="offerService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Run forever]]>
+ </doc>
+ </method>
+ <method name="getTotalSubmissions" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobTrackerMachine" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTrackerIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the unique identifier (ie. timestamp) of this job tracker start.
+ @return a string with a unique identifier]]>
+ </doc>
+ </method>
+ <method name="getTrackerPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="runningJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRunningJobs" return="java.util.List&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version that is called from a timer thread, and therefore needs to be
+ careful to synchronize.]]>
+ </doc>
+ </method>
+ <method name="failedJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="completedJobs" return="java.util.Vector&lt;org.apache.hadoop.mapred.JobInProgress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="taskTrackers" return="java.util.Collection&lt;org.apache.hadoop.mapred.TaskTrackerStatus&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskTracker" return="org.apache.hadoop.mapred.TaskTrackerStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trackerID" type="java.lang.String"/>
+ </method>
+ <method name="resolveAndAddToTopology" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getNodesAtMaxLevel" return="java.util.Collection&lt;org.apache.hadoop.net.Node&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a collection of nodes at the max level]]>
+ </doc>
+ </method>
+ <method name="getParentNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <param name="level" type="int"/>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the Node in the network topology that corresponds to the hostname]]>
+ </doc>
+ </method>
+ <method name="getNumTaskCacheLevels" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumResolvedTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumberOfUniqueHosts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="addJobInProgressListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="listener" type="org.apache.hadoop.mapred.JobInProgressListener"/>
+ </method>
+ <method name="removeJobInProgressListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="listener" type="org.apache.hadoop.mapred.JobInProgressListener"/>
+ </method>
+ <method name="getQueueManager" return="org.apache.hadoop.mapred.QueueManager"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the {@link QueueManager} associated with the JobTracker.]]>
+ </doc>
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="heartbeat" return="org.apache.hadoop.mapred.HeartbeatResponse"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskTrackerStatus"/>
+ <param name="initialContact" type="boolean"/>
+ <param name="acceptNewTasks" type="boolean"/>
+ <param name="responseId" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The periodic heartbeat mechanism between the {@link TaskTracker} and
+ the {@link JobTracker}.
+
+ The {@link JobTracker} processes the status information sent by the
+ {@link TaskTracker} and responds with instructions to start/stop
+ tasks or jobs, and also 'reset' instructions during contingencies.]]>
+ </doc>
+ </method>
+ <method name="getNextHeartbeatInterval" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Calculates next heartbeat interval using cluster size.
+ Heartbeat interval is incremented 1second for every 50 nodes.
+ @return next heartbeat interval.]]>
+ </doc>
+ </method>
+ <method name="getFilesystemName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Grab the local fs name]]>
+ </doc>
+ </method>
+ <method name="reportTaskTrackerError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTracker" type="java.lang.String"/>
+ <param name="errorClass" type="java.lang.String"/>
+ <param name="errorMessage" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNewJobId" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Allocates a new JobId string.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[JobTracker.submitJob() kicks off a new job.
+
+ Create a 'JobInProgress' object, which contains both JobProfile
+ and JobStatus. Those two sub-objects are sometimes shipped outside
+ of the JobTracker. But JobInProgress adds info that's useful for
+ the JobTracker alone.]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="priority" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the priority of a job
+ @param jobid id of the job
+ @param priority new priority of the job]]>
+ </doc>
+ </method>
+ <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getCleanupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getSetupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxEvents" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the diagnostics for a given task
+ @param taskId the id of the task
+ @return an array of the diagnostic messages]]>
+ </doc>
+ </method>
+ <method name="getTip" return="org.apache.hadoop.mapred.TaskInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tipid" type="org.apache.hadoop.mapred.TaskID"/>
+ <doc>
+ <![CDATA[Returns specified TaskInProgress, or null.]]>
+ </doc>
+ </method>
+ <method name="killTask" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a Task to be killed]]>
+ </doc>
+ </method>
+ <method name="getAssignedTracker" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Get tracker name for a given task id.
+ @param taskId the name of the task
+ @return The name of the task tracker]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSystemDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@see org.apache.hadoop.mapred.JobSubmissionProtocol#getSystemDir()]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.JobInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Get the localized job file path on the job trackers local file system
+ @param jobId id of the job
+ @return the path of the job conf file on the local file system]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker process. This is used only for debugging. As a rule,
+ JobTracker should be run as part of the DFS Namenode process.]]>
+ </doc>
+ </method>
+ <method name="getQueues" return="org.apache.hadoop.mapred.JobQueueInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getQueueInfo" return="org.apache.hadoop.mapred.JobQueueInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queue" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getJobsFromQueue" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queue" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[JobTracker is the central location for submitting and
+ tracking MR jobs in a network environment.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <class name="JobTracker.IllegalStateException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobTracker.IllegalStateException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A client tried to submit a job before the Job Tracker was ready.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.State -->
+ <class name="JobTracker.State" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.JobTracker.State&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobTracker.State[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.State -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <class name="KeyValueLineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="KeyValueLineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="findSeparator" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="sep" type="byte"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class treats a line in the input as a key/value pair separated by a
+ separator character. The separator can be specified in config file
+ under the attribute name key.value.separator.in.input.line. The default
+ separator is the tab character ('\t').]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <class name="KeyValueTextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="KeyValueTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Each line
+ is divided into key and value parts by a separator byte. If no such a byte
+ exists, the key will be the entire line and value will be empty.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader -->
+ <class name="LineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="LineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.LongWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.LongWritable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the progress within the split]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Treats keys as offset in file and value as line.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <class name="LineRecordReader.LineReader" extends="org.apache.hadoop.util.LineReader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.util.LineReader} instead.">
+ <constructor name="LineRecordReader.LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <doc>
+ <![CDATA[A class that provides a line reader from an input stream.
+ @deprecated Use {@link org.apache.hadoop.util.LineReader} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <!-- start class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <class name="MapFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.MapFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getEntry" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readers" type="org.apache.hadoop.io.MapFile.Reader[]"/>
+ <param name="partitioner" type="org.apache.hadoop.mapred.Partitioner&lt;K, V&gt;"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an entry from output generated by this class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link MapFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.Mapper -->
+ <interface name="Mapper" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1"/>
+ <param name="value" type="V1"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Maps a single input key/value pair into an intermediate key/value pair.
+
+ <p>Output pairs need not be of the same types as input pairs. A given
+ input pair may map to zero or many output pairs. Output pairs are
+ collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the input key.
+ @param value the input value.
+ @param output collects mapped keys and values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Maps input key/value pairs to a set of intermediate key/value pairs.
+
+ <p>Maps are the individual tasks which transform input records into a
+ intermediate records. The transformed intermediate records need not be of
+ the same type as the input records. A given input pair may map to zero or
+ many output pairs.</p>
+
+ <p>The Hadoop Map-Reduce framework spawns one map task for each
+ {@link InputSplit} generated by the {@link InputFormat} for the job.
+ <code>Mapper</code> implementations can access the {@link JobConf} for the
+ job via the {@link JobConfigurable#configure(JobConf)} and initialize
+ themselves. Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p>The framework then calls
+ {@link #map(Object, Object, OutputCollector, Reporter)}
+ for each key/value pair in the <code>InputSplit</code> for that task.</p>
+
+ <p>All intermediate values associated with a given output key are
+ subsequently grouped by the framework, and passed to a {@link Reducer} to
+ determine the final output. Users can control the grouping by specifying
+ a <code>Comparator</code> via
+ {@link JobConf#setOutputKeyComparatorClass(Class)}.</p>
+
+ <p>The grouped <code>Mapper</code> outputs are partitioned per
+ <code>Reducer</code>. Users can control which keys (and hence records) go to
+ which <code>Reducer</code> by implementing a custom {@link Partitioner}.
+
+ <p>Users can optionally specify a <code>combiner</code>, via
+ {@link JobConf#setCombinerClass(Class)}, to perform local aggregation of the
+ intermediate outputs, which helps to cut down the amount of data transferred
+ from the <code>Mapper</code> to the <code>Reducer</code>.
+
+ <p>The intermediate, grouped outputs are always stored in
+ {@link SequenceFile}s. Applications can specify if and how the intermediate
+ outputs are to be compressed and which {@link CompressionCodec}s are to be
+ used via the <code>JobConf</code>.</p>
+
+ <p>If the job has
+ <a href="{@docRoot}/org/apache/hadoop/mapred/JobConf.html#ReducerNone">zero
+ reduces</a> then the output of the <code>Mapper</code> is directly written
+ to the {@link FileSystem} without grouping by keys.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyMapper&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Mapper&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String mapTaskId;
+ private String inputFile;
+ private int noRecords = 0;
+
+ public void configure(JobConf job) {
+ mapTaskId = job.get("mapred.task.id");
+ inputFile = job.get("mapred.input.file");
+ }
+
+ public void map(K key, V val,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ // reporter.progress();
+
+ // Process some more
+ // ...
+ // ...
+
+ // Increment the no. of &lt;key, value&gt; pairs processed
+ ++noRecords;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 records update application-level status
+ if ((noRecords%100) == 0) {
+ reporter.setStatus(mapTaskId + " processed " + noRecords +
+ " from input-file: " + inputFile);
+ }
+
+ // Output the result
+ output.collect(key, val);
+ }
+ }
+ </pre></blockquote></p>
+
+ <p>Applications may write a custom {@link MapRunnable} to exert greater
+ control on map processing e.g. multi-threaded <code>Mapper</code>s etc.</p>
+
+ @see JobConf
+ @see InputFormat
+ @see Partitioner
+ @see Reducer
+ @see MapReduceBase
+ @see MapRunnable
+ @see SequenceFile]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Mapper -->
+ <!-- start class org.apache.hadoop.mapred.MapReduceBase -->
+ <class name="MapReduceBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="MapReduceBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for {@link Mapper} and {@link Reducer} implementations.
+
+ <p>Provides default no-op implementations for a few methods, most non-trivial
+ applications need to override some of them.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapReduceBase -->
+ <!-- start interface org.apache.hadoop.mapred.MapRunnable -->
+ <interface name="MapRunnable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start mapping input <tt>&lt;key, value&gt;</tt> pairs.
+
+ <p>Mapping of input records to output records is complete when this method
+ returns.</p>
+
+ @param input the {@link RecordReader} to read the input records.
+ @param output the {@link OutputCollector} to collect the outputrecords.
+ @param reporter {@link Reporter} to report progress, status-updates etc.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Expert: Generic interface for {@link Mapper}s.
+
+ <p>Custom implementations of <code>MapRunnable</code> can exert greater
+ control on map processing e.g. multi-threaded, asynchronous mappers etc.</p>
+
+ @see Mapper]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.MapRunnable -->
+ <!-- start class org.apache.hadoop.mapred.MapRunner -->
+ <class name="MapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="MapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMapper" return="org.apache.hadoop.mapred.Mapper&lt;K1, V1, K2, V2&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Default {@link MapRunnable} implementation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapRunner -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <class name="MultiFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;K, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultiFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s
+ in {@link #getSplits(JobConf, int)} method. Splits are constructed from
+ the files under the input paths. Each split returned contains <i>nearly</i>
+ equal content length. <br>
+ Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)}
+ to construct <code>RecordReader</code>'s for <code>MultiFileSplit</code>'s.
+ @see MultiFileSplit]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileSplit -->
+ <class name="MultiFileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="MultiFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLengths" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array containing the lengths of the files in
+ the split]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the length of the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getNumPaths" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all the Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A sub-collection of input files. Unlike {@link FileSplit}, MultiFileSplit
+ class does not represent a split of a file, but a split of input files
+ into smaller sets. The atomic unit of split is a file. <br>
+ MultiFileSplit can be used to implement {@link RecordReader}'s, with
+ reading one record per file.
+ @see FileSplit
+ @see MultiFileInputFormat]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileSplit -->
+ <!-- start interface org.apache.hadoop.mapred.OutputCollector -->
+ <interface name="OutputCollector" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="collect"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Adds a key/value pair to the output.
+
+ @param key the key to collect.
+ @param value to value to collect.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Collects the <code>&lt;key, value&gt;</code> pairs output by {@link Mapper}s
+ and {@link Reducer}s.
+
+ <p><code>OutputCollector</code> is the generalization of the facility
+ provided by the Map-Reduce framework to collect data output by either the
+ <code>Mapper</code> or the <code>Reducer</code> i.e. intermediate outputs
+ or the output of the job.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputCollector -->
+ <!-- start class org.apache.hadoop.mapred.OutputCommitter -->
+ <class name="OutputCommitter" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OutputCommitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setupJob"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobContext" type="org.apache.hadoop.mapred.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For the framework to setup the job output during initialization
+
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException if temporary output could not be created]]>
+ </doc>
+ </method>
+ <method name="cleanupJob"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobContext" type="org.apache.hadoop.mapred.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For cleaning up the job's output after job completion
+
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setupTask"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets up output for the task.
+
+ @param taskContext Context of the task whose output is being written.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="needsTaskCommit" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check whether task needs a commit
+
+ @param taskContext
+ @return true/false
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="commitTask"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[To promote the task's temporary output to final output location
+
+ The task's output is moved to the job's output directory.
+
+ @param taskContext Context of the task whose output is being written.
+ @throws IOException if commit is not]]>
+ </doc>
+ </method>
+ <method name="abortTask"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Discard the task output
+
+ @param taskContext
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>OutputCommitter</code> describes the commit of task output for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputCommitter</code> of
+ the job to:<p>
+ <ol>
+ <li>
+ Setup the job during initialization. For example, create the temporary
+ output directory for the job during the initialization of the job.
+ </li>
+ <li>
+ Cleanup the job after the job completion. For example, remove the
+ temporary output directory after the job completion.
+ </li>
+ <li>
+ Setup the task temporary output.
+ </li>
+ <li>
+ Check whether a task needs a commit. This is to avoid the commit
+ procedure if a task does not need commit.
+ </li>
+ <li>
+ Commit of the task output.
+ </li>
+ <li>
+ Discard the task commit.
+ </li>
+ </ol>
+
+ @see FileOutputCommitter
+ @see JobContext
+ @see TaskAttemptContext]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputCommitter -->
+ <!-- start interface org.apache.hadoop.mapred.OutputFormat -->
+ <interface name="OutputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordWriter} for the given job.
+
+ @param ignored
+ @param job configuration for the job whose output is being written.
+ @param name the unique name for this part of the output.
+ @param progress mechanism for reporting progress while writing to file.
+ @return a {@link RecordWriter} to write the output for the job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check for validity of the output-specification for the job.
+
+ <p>This is to validate the output specification for the job when it is
+ a job is submitted. Typically checks that it does not already exist,
+ throwing an exception when it already exists, so that output is not
+ overwritten.</p>
+
+ @param ignored
+ @param job job configuration.
+ @throws IOException when output should not be attempted]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>OutputFormat</code> describes the output-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the output-specification of the job. For e.g. check that the
+ output directory doesn't already exist.
+ <li>
+ Provide the {@link RecordWriter} implementation to be used to write out
+ the output files of the job. Output files are stored in a
+ {@link FileSystem}.
+ </li>
+ </ol>
+
+ @see RecordWriter
+ @see JobConf]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.OutputLogFilter -->
+ <class name="OutputLogFilter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.PathFilter"/>
+ <constructor name="OutputLogFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <doc>
+ <![CDATA[This class filters log files from directory given
+ It doesnt accept paths having _logs.
+ This can be used to list paths of output directory as follows:
+ Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
+ new OutputLogFilter()));]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputLogFilter -->
+ <!-- start interface org.apache.hadoop.mapred.Partitioner -->
+ <interface name="Partitioner" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numPartitions" type="int"/>
+ <doc>
+ <![CDATA[Get the paritition number for a given key (hence record) given the total
+ number of partitions i.e. number of reduce-tasks for the job.
+
+ <p>Typically a hash function on a all or a subset of the key.</p>
+
+ @param key the key to be paritioned.
+ @param value the entry value.
+ @param numPartitions the total number of partitions.
+ @return the partition number for the <code>key</code>.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partitions the key space.
+
+ <p><code>Partitioner</code> controls the partitioning of the keys of the
+ intermediate map-outputs. The key (or a subset of the key) is used to derive
+ the partition, typically by a hash function. The total number of partitions
+ is the same as the number of reduce tasks for the job. Hence this controls
+ which of the <code>m</code> reduce tasks the intermediate key (and hence the
+ record) is sent for reduction.</p>
+
+ @see Reducer]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Partitioner -->
+ <!-- start interface org.apache.hadoop.mapred.RecordReader -->
+ <interface name="RecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the next key/value pair from the input for processing.
+
+ @param key the key to read data into
+ @param value the value to read data into
+ @return true iff a key/value was read, false if at EOF]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a key.
+
+ @return a new key object.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a value.
+
+ @return a new value object.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current position in the input.
+
+ @return the current position in the input.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this {@link InputSplit} to future operations.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[How much of the input has the {@link RecordReader} consumed i.e.
+ has been processed by?
+
+ @return progress from <code>0.0</code> to <code>1.0</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordReader</code> reads &lt;key, value&gt; pairs from an
+ {@link InputSplit}.
+
+ <p><code>RecordReader</code>, typically, converts the byte-oriented view of
+ the input, provided by the <code>InputSplit</code>, and presents a
+ record-oriented view for the {@link Mapper} & {@link Reducer} tasks for
+ processing. It thus assumes the responsibility of processing record
+ boundaries and presenting the tasks with keys and values.</p>
+
+ @see InputSplit
+ @see InputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordReader -->
+ <!-- start interface org.apache.hadoop.mapred.RecordWriter -->
+ <interface name="RecordWriter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes a key/value pair.
+
+ @param key the key to write.
+ @param value the value to write.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this <code>RecordWriter</code> to future operations.
+
+ @param reporter facility to report progress.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordWriter</code> writes the output &lt;key, value&gt; pairs
+ to an output file.
+
+ <p><code>RecordWriter</code> implementations write the job outputs to the
+ {@link FileSystem}.
+
+ @see OutputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordWriter -->
+ <!-- start interface org.apache.hadoop.mapred.Reducer -->
+ <interface name="Reducer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="values" type="java.util.Iterator&lt;V2&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K3, V3&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<i>Reduces</i> values for a given key.
+
+ <p>The framework calls this method for each
+ <code>&lt;key, (list of values)></code> pair in the grouped inputs.
+ Output values must be of the same type as input values. Input keys must
+ not be altered. The framework will <b>reuse</b> the key and value objects
+ that are passed into the reduce, therefore the application should clone
+ the objects they want to keep a copy of. In many cases, all values are
+ combined into zero or one value.
+ </p>
+
+ <p>Output pairs are collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the key.
+ @param values the list of values to reduce.
+ @param output to collect keys and combined values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reduces a set of intermediate values which share a key to a smaller set of
+ values.
+
+ <p>The number of <code>Reducer</code>s for the job is set by the user via
+ {@link JobConf#setNumReduceTasks(int)}. <code>Reducer</code> implementations
+ can access the {@link JobConf} for the job via the
+ {@link JobConfigurable#configure(JobConf)} method and initialize themselves.
+ Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p><code>Reducer</code> has 3 primary phases:</p>
+ <ol>
+ <li>
+
+ <h4 id="Shuffle">Shuffle</h4>
+
+ <p><code>Reducer</code> is input the grouped output of a {@link Mapper}.
+ In the phase the framework, for each <code>Reducer</code>, fetches the
+ relevant partition of the output of all the <code>Mapper</code>s, via HTTP.
+ </p>
+ </li>
+
+ <li>
+ <h4 id="Sort">Sort</h4>
+
+ <p>The framework groups <code>Reducer</code> inputs by <code>key</code>s
+ (since different <code>Mapper</code>s may have output the same key) in this
+ stage.</p>
+
+ <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
+ being fetched they are merged.</p>
+
+ <h5 id="SecondarySort">SecondarySort</h5>
+
+ <p>If equivalence rules for keys while grouping the intermediates are
+ different from those for grouping keys before reduction, then one may
+ specify a <code>Comparator</code> via
+ {@link JobConf#setOutputValueGroupingComparator(Class)}.Since
+ {@link JobConf#setOutputKeyComparatorClass(Class)} can be used to
+ control how intermediate keys are grouped, these can be used in conjunction
+ to simulate <i>secondary sort on values</i>.</p>
+
+
+ For example, say that you want to find duplicate web pages and tag them
+ all with the url of the "best" known example. You would set up the job
+ like:
+ <ul>
+ <li>Map Input Key: url</li>
+ <li>Map Input Value: document</li>
+ <li>Map Output Key: document checksum, url pagerank</li>
+ <li>Map Output Value: url</li>
+ <li>Partitioner: by checksum</li>
+ <li>OutputKeyComparator: by checksum and then decreasing pagerank</li>
+ <li>OutputValueGroupingComparator: by checksum</li>
+ </ul>
+ </li>
+
+ <li>
+ <h4 id="Reduce">Reduce</h4>
+
+ <p>In this phase the
+ {@link #reduce(Object, Iterator, OutputCollector, Reporter)}
+ method is called for each <code>&lt;key, (list of values)></code> pair in
+ the grouped inputs.</p>
+ <p>The output of the reduce task is typically written to the
+ {@link FileSystem} via
+ {@link OutputCollector#collect(Object, Object)}.</p>
+ </li>
+ </ol>
+
+ <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyReducer&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Reducer&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String reduceTaskId;
+ private int noKeys = 0;
+
+ public void configure(JobConf job) {
+ reduceTaskId = job.get("mapred.task.id");
+ }
+
+ public void reduce(K key, Iterator&lt;V&gt; values,
+ OutputCollector&lt;K, V&gt; output,
+ Reporter reporter)
+ throws IOException {
+
+ // Process
+ int noValues = 0;
+ while (values.hasNext()) {
+ V value = values.next();
+
+ // Increment the no. of values for this key
+ ++noValues;
+
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ if ((noValues%10) == 0) {
+ reporter.progress();
+ }
+
+ // Process some more
+ // ...
+ // ...
+
+ // Output the &lt;key, value&gt;
+ output.collect(key, value);
+ }
+
+ // Increment the no. of &lt;key, list of values&gt; pairs processed
+ ++noKeys;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 keys update application-level status
+ if ((noKeys%100) == 0) {
+ reporter.setStatus(reduceTaskId + " processed " + noKeys);
+ }
+ }
+ }
+ </pre></blockquote></p>
+
+ @see Mapper
+ @see Partitioner
+ @see Reporter
+ @see MapReduceBase]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reducer -->
+ <!-- start interface org.apache.hadoop.mapred.Reporter -->
+ <interface name="Reporter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Progressable"/>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the status description for the task.
+
+ @param status brief description of the current status.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the {@link Counter} of the given group with the given name.
+
+ @param group counter group
+ @param name counter name
+ @return the <code>Counter</code> of the given group/name.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the key, which can be of
+ any {@link Enum} type, by the specified amount.
+
+ @param key key to identify the counter to be incremented. The key can be
+ be any <code>Enum</code>.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="counter" type="java.lang.String"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the group and counter name
+ by the specified amount.
+
+ @param group name to identify the group of the counter to be incremented.
+ @param counter name to identify the counter within the group.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="getInputSplit" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+ <doc>
+ <![CDATA[Get the {@link InputSplit} object for a map.
+
+ @return the <code>InputSplit</code> that the map is reading from.
+ @throws UnsupportedOperationException if called outside a mapper]]>
+ </doc>
+ </method>
+ <field name="NULL" type="org.apache.hadoop.mapred.Reporter"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A constant of Reporter type that does nothing.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A facility for Map-Reduce applications to report progress and update
+ counters, status information etc.
+
+ <p>{@link Mapper} and {@link Reducer} can use the <code>Reporter</code>
+ provided to report progress or just indicate that they are alive. In
+ scenarios where the application takes an insignificant amount of time to
+ process individual key/value pairs, this is crucial since the framework
+ might assume that the task has timed-out and kill that task.
+
+ <p>Applications can also update {@link Counters} via the provided
+ <code>Reporter</code> .</p>
+
+ @see Progressable
+ @see Counters]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reporter -->
+ <!-- start interface org.apache.hadoop.mapred.RunningJob -->
+ <interface name="RunningJob" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job identifier.
+
+ @return the job identifier.]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="This method is deprecated and will be removed. Applications should
+ rather use {@link #getID()}.">
+ <doc>
+ <![CDATA[@deprecated This method is deprecated and will be removed. Applications should
+ rather use {@link #getID()}.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the job.
+
+ @return the name of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the path of the submitted job configuration.
+
+ @return the path of the submitted job configuration.]]>
+ </doc>
+ </method>
+ <method name="getTrackingURL" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the URL where some job progress information will be displayed.
+
+ @return the URL where some job progress information will be displayed.]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's map-tasks, as a float between 0.0
+ and 1.0. When all map tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's map-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0
+ and 1.0. When all reduce tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's reduce-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cleanupProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's cleanup-tasks, as a float between 0.0
+ and 1.0. When all cleanup tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's cleanup-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setupProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's setup-tasks, as a float between 0.0
+ and 1.0. When all setup tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's setup-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isComplete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job is finished or not.
+ This is a non-blocking call.
+
+ @return <code>true</code> if the job is complete, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isSuccessful" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job completed successfully.
+
+ @return <code>true</code> if the job succeeded, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="waitForCompletion"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Blocks until the job is complete.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJobState" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current state of the Job.
+ {@link JobStatus}
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill the running job. Blocks until all job tasks have been
+ killed as well. If the job is no longer running, it simply returns.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="priority" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the priority of a running job.
+ @param priority the new priority for the job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="startFrom" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get events indicating completion (success/failure) of component tasks.
+
+ @param startFrom index to start fetching events from
+ @return an array of {@link TaskCompletionEvent}s
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill indicated task attempt.
+
+ @param taskId the id of the task to be terminated.
+ @param shouldFail if true the task is failed and added to failed tasks
+ list, otherwise it is just killed, w/o affecting
+ job failure status.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #killTask(TaskAttemptID, boolean)}">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the counters for this job.
+
+ @return the counters for this job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RunningJob</code> is the user-interface to query for details on a
+ running Map-Reduce job.
+
+ <p>Clients can get hold of <code>RunningJob</code> via the {@link JobClient}
+ and then query the running-job for details such as name, configuration,
+ progress etc.</p>
+
+ @see JobClient]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RunningJob -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <class name="SequenceFileAsBinaryInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[InputFormat reading keys, values from SequenceFiles in binary (raw)
+ format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <class name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"/>
+ <constructor name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the key class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getKeyClassName]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the value class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getValueClassName]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.BytesWritable"/>
+ <param name="val" type="org.apache.hadoop.io.BytesWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read raw bytes from a SequenceFile.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Read records from a SequenceFile as binary (raw) bytes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+ <class name="SequenceFileAsBinaryOutputFormat" extends="org.apache.hadoop.mapred.SequenceFileOutputFormat&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setSequenceFileOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the key class for the {@link SequenceFile}
+ <p>This allows the user to specify the key class to be different
+ from the actual class ({@link BytesWritable}) used for writing </p>
+
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+ </doc>
+ </method>
+ <method name="setSequenceFileOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="theClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Set the value class for the {@link SequenceFile}
+ <p>This allows the user to specify the value class to be different
+ from the actual class ({@link BytesWritable}) used for writing </p>
+
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+ </doc>
+ </method>
+ <method name="getSequenceFileOutputKeyClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the key class for the {@link SequenceFile}
+
+ @return the key class of the {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <method name="getSequenceFileOutputValueClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the value class for the {@link SequenceFile}
+
+ @return the value class of the {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes keys, values to
+ {@link SequenceFile}s in binary(raw) format]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
+ <class name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" type="org.apache.hadoop.io.BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.BytesWritable"/>
+ </method>
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Inner class used for appendRaw]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <class name="SequenceFileAsTextInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is similar to SequenceFileInputFormat, except it generates SequenceFileAsTextRecordReader
+ which converts the input keys and values to their String forms by calling toString() method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <class name="SequenceFileAsTextRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="SequenceFileAsTextRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class converts the input keys and values to their String forms by calling toString()
+ method. This class to SequenceFileAsTextInputFormat class is as LineRecordReader
+ class to TextInputFormat class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <class name="SequenceFileInputFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a record reader for the given split
+ @param split file split
+ @param job job configuration
+ @param reporter reporter who sends report to task tracker
+ @return RecordReader]]>
+ </doc>
+ </method>
+ <method name="setFilterClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="filterClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[set the filter class
+
+ @param conf application configuration
+ @param filterClass filter class]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that allows a map/red job to work on a sample of sequence files.
+ The sample is decided by the filter class set by the job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <!-- start interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <interface name="SequenceFileInputFilter.Filter" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[filter function
+ Decide if a record should be filtered or not
+ @param key record key
+ @return true if a record is accepted; return false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[filter interface]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <class name="SequenceFileInputFilter.FilterBase" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.SequenceFileInputFilter.Filter"/>
+ <constructor name="SequenceFileInputFilter.FilterBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[base class for Filters]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <class name="SequenceFileInputFilter.MD5Filter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.MD5Filter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the filtering frequency in configuration
+
+ @param conf configuration
+ @param frequency filtering frequency]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter according to configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If MD5(key) % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class returns a set of records by examing the MD5 digest of its
+ key against a filtering frequency <i>f</i>. The filtering criteria is
+ MD5(key) % f == 0.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <class name="SequenceFileInputFilter.PercentFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.PercentFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the frequency and stores it in conf
+ @param conf configuration
+ @param frequency filtering frequencey]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter by checking the configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If record# % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class returns a percentage of records
+ The percentage is determined by a filtering frequency <i>f</i> using
+ the criteria record# % f == 0.
+ For example, if the frequency is 10, one out of 10 records is returned.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <class name="SequenceFileInputFilter.RegexFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.RegexFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setPattern"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="regex" type="java.lang.String"/>
+ <exception name="PatternSyntaxException" type="java.util.regex.PatternSyntaxException"/>
+ <doc>
+ <![CDATA[Define the filtering regex and stores it in conf
+ @param conf where the regex is set
+ @param regex regex used as a filter]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the Filter by checking the configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If key matches the regex, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Records filter by matching key to regex]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <class name="SequenceFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <class name="SequenceFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.SequenceFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf}
+ @return the {@link CompressionType} for the output {@link SequenceFile},
+ defaulting to {@link CompressionType#RECORD}]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf} to modify
+ @param style the {@link CompressionType} for the output
+ {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <class name="SequenceFileRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"/>
+ <constructor name="SequenceFileRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of key that must be passed to {@link
+ #next(Object, Object)}..]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of value that must be passed to {@link
+ #next(Object, Object)}..]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="conf" type="org.apache.hadoop.conf.Configuration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An {@link RecordReader} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SkipBadRecords -->
+ <class name="SkipBadRecords" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SkipBadRecords"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getAttemptsToStartSkipping" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the number of Task attempts AFTER which skip mode
+ will be kicked off. When skip mode is kicked off, the
+ tasks reports the range of records which it will process
+ next to the TaskTracker. So that on failures, TT knows which
+ ones are possibly the bad records. On further executions,
+ those are skipped.
+ Default value is 2.
+
+ @param conf the configuration
+ @return attemptsToStartSkipping no of task attempts]]>
+ </doc>
+ </method>
+ <method name="setAttemptsToStartSkipping"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attemptsToStartSkipping" type="int"/>
+ <doc>
+ <![CDATA[Set the number of Task attempts AFTER which skip mode
+ will be kicked off. When skip mode is kicked off, the
+ tasks reports the range of records which it will process
+ next to the TaskTracker. So that on failures, TT knows which
+ ones are possibly the bad records. On further executions,
+ those are skipped.
+ Default value is 2.
+
+ @param conf the configuration
+ @param attemptsToStartSkipping no of task attempts]]>
+ </doc>
+ </method>
+ <method name="getAutoIncrMapperProcCount" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the flag which if set to true,
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented
+ by MapRunner after invoking the map function. This value must be set to
+ false for applications which process the records asynchronously
+ or buffer the input records. For example streaming.
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+
+ @param conf the configuration
+ @return <code>true</code> if auto increment
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}.
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setAutoIncrMapperProcCount"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="autoIncr" type="boolean"/>
+ <doc>
+ <![CDATA[Set the flag which if set to true,
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented
+ by MapRunner after invoking the map function. This value must be set to
+ false for applications which process the records asynchronously
+ or buffer the input records. For example streaming.
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+
+ @param conf the configuration
+ @param autoIncr whether to auto increment
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}.]]>
+ </doc>
+ </method>
+ <method name="getAutoIncrReducerProcCount" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the flag which if set to true,
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS} is incremented
+ by framework after invoking the reduce function. This value must be set to
+ false for applications which process the records asynchronously
+ or buffer the input records. For example streaming.
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+
+ @param conf the configuration
+ @return <code>true</code> if auto increment
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}.
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setAutoIncrReducerProcCount"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="autoIncr" type="boolean"/>
+ <doc>
+ <![CDATA[Set the flag which if set to true,
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS} is incremented
+ by framework after invoking the reduce function. This value must be set to
+ false for applications which process the records asynchronously
+ or buffer the input records. For example streaming.
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+
+ @param conf the configuration
+ @param autoIncr whether to auto increment
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}.]]>
+ </doc>
+ </method>
+ <method name="getSkipOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the directory to which skipped records are written. By default it is
+ the sub directory of the output _logs directory.
+ User can stop writing skipped records by setting the value null.
+
+ @param conf the configuration.
+ @return path skip output directory. Null is returned if this is not set
+ and output directory is also not set.]]>
+ </doc>
+ </method>
+ <method name="setSkipOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the directory to which skipped records are written. By default it is
+ the sub directory of the output _logs directory.
+ User can stop writing skipped records by setting the value null.
+
+ @param conf the configuration.
+ @param path skip output directory path]]>
+ </doc>
+ </method>
+ <method name="getMapperMaxSkipRecords" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the number of acceptable skip records surrounding the bad record PER
+ bad record in mapper. The number includes the bad record as well.
+ To turn the feature of detection/skipping of bad records off, set the
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying
+ until this threshold is met OR all attempts get exhausted for this task.
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to
+ narrow down. Whatever records(depends on application) get skipped are
+ acceptable.
+ Default value is 0.
+
+ @param conf the configuration
+ @return maxSkipRecs acceptable skip records.]]>
+ </doc>
+ </method>
+ <method name="setMapperMaxSkipRecords"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="maxSkipRecs" type="long"/>
+ <doc>
+ <![CDATA[Set the number of acceptable skip records surrounding the bad record PER
+ bad record in mapper. The number includes the bad record as well.
+ To turn the feature of detection/skipping of bad records off, set the
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying
+ until this threshold is met OR all attempts get exhausted for this task.
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to
+ narrow down. Whatever records(depends on application) get skipped are
+ acceptable.
+ Default value is 0.
+
+ @param conf the configuration
+ @param maxSkipRecs acceptable skip records.]]>
+ </doc>
+ </method>
+ <method name="getReducerMaxSkipGroups" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the number of acceptable skip groups surrounding the bad group PER
+ bad group in reducer. The number includes the bad group as well.
+ To turn the feature of detection/skipping of bad groups off, set the
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying
+ until this threshold is met OR all attempts get exhausted for this task.
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to
+ narrow down. Whatever groups(depends on application) get skipped are
+ acceptable.
+ Default value is 0.
+
+ @param conf the configuration
+ @return maxSkipGrps acceptable skip groups.]]>
+ </doc>
+ </method>
+ <method name="setReducerMaxSkipGroups"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="maxSkipGrps" type="long"/>
+ <doc>
+ <![CDATA[Set the number of acceptable skip groups surrounding the bad group PER
+ bad group in reducer. The number includes the bad group as well.
+ To turn the feature of detection/skipping of bad groups off, set the
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying
+ until this threshold is met OR all attempts get exhausted for this task.
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to
+ narrow down. Whatever groups(depends on application) get skipped are
+ acceptable.
+ Default value is 0.
+
+ @param conf the configuration
+ @param maxSkipGrps acceptable skip groups.]]>
+ </doc>
+ </method>
+ <field name="COUNTER_GROUP" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Special counters which are written by the application and are
+ used by the framework for detecting bad records. For detecting bad records
+ these counters must be incremented by the application.]]>
+ </doc>
+ </field>
+ <field name="COUNTER_MAP_PROCESSED_RECORDS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of processed map records.
+ @see SkipBadRecords#getAutoIncrMapperProcCount(Configuration)]]>
+ </doc>
+ </field>
+ <field name="COUNTER_REDUCE_PROCESSED_GROUPS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of processed reduce groups.
+ @see SkipBadRecords#getAutoIncrReducerProcCount(Configuration)]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Utility class for skip bad records functionality. It contains various
+ settings related to skipping of bad records.
+
+ <p>Hadoop provides an optional mode of execution in which the bad records
+ are detected and skipped in further attempts.
+
+ <p>This feature can be used when map/reduce tasks crashes deterministically on
+ certain input. This happens due to bugs in the map/reduce function. The usual
+ course would be to fix these bugs. But sometimes this is not possible;
+ perhaps the bug is in third party libraries for which the source code is
+ not available. Due to this, the task never reaches to completion even with
+ multiple attempts and complete data for that task is lost.</p>
+
+ <p>With this feature, only a small portion of data is lost surrounding
+ the bad record, which may be acceptable for some user applications.
+ see {@link SkipBadRecords#setMapperMaxSkipRecords(Configuration, long)}</p>
+
+ <p>The skipping mode gets kicked off after certain no of failures
+ see {@link SkipBadRecords#setAttemptsToStartSkipping(Configuration, int)}</p>
+
+ <p>In the skipping mode, the map/reduce task maintains the record range which
+ is getting processed at all times. Before giving the input to the
+ map/reduce function, it sends this record range to the Task tracker.
+ If task crashes, the Task tracker knows which one was the last reported
+ range. On further attempts that range get skipped.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SkipBadRecords -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer -->
+ <class name="StatusHttpServer" extends="org.apache.hadoop.http.HttpServer"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A mapred http server.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer -->
+ <!-- start class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet -->
+ <class name="StatusHttpServer.TaskGraphServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusHttpServer.TaskGraphServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="width" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="height" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="ymargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on y axis]]>
+ </doc>
+ </field>
+ <field name="xmargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on x axis]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The servlet that outputs svg graphics for map / reduce task
+ statuses]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskAttemptContext -->
+ <class name="TaskAttemptContext" extends="org.apache.hadoop.mapred.JobContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTaskAttemptID" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the taskAttemptID.
+
+ @return TaskAttemptID]]>
+ </doc>
+ </method>
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job Configuration.
+
+ @return JobConf]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskAttemptContext -->
+ <!-- start class org.apache.hadoop.mapred.TaskAttemptID -->
+ <class name="TaskAttemptID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskAttemptID" type="org.apache.hadoop.mapred.TaskID, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskAttemptID object from given {@link TaskID}.
+ @param taskId TaskID that this task belongs to
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskAttemptID" type="java.lang.String, int, boolean, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param taskId taskId number
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link JobID} object that this task attempt belongs to]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link TaskID} object that this task attempt belongs to]]>
+ </doc>
+ </method>
+ <method name="isMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns whether this TaskAttemptID is a map ID]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare TaskIds by first tipIds, then by task numbers.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskAttemptID object from given string
+ @return constructed TaskAttemptID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getTaskAttemptIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <param name="isMap" type="java.lang.Boolean"/>
+ <param name="taskId" type="java.lang.Integer"/>
+ <param name="attemptId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task attempt IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>all task attempt IDs</i>
+ of <i>any jobtracker</i>, in <i>any job</i>, of the <i>first
+ map task</i>, we would use :
+ <pre>
+ TaskAttemptID.getTaskAttemptIDsPattern(null, null, true, 1, null);
+ </pre>
+ which will return :
+ <pre> "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null
+ @param taskId taskId number, or null
+ @param attemptId the task attempt number, or null
+ @return a regex pattern matching TaskAttemptIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[TaskAttemptID represents the immutable and unique identifier for
+ a task attempt. Each task attempt is one particular instance of a Map or
+ Reduce Task identified by its TaskID.
+
+ TaskAttemptID consists of 2 parts. First part is the
+ {@link TaskID}, that this TaskAttemptID belongs to.
+ Second part is the task attempt number. <br>
+ An example TaskAttemptID is :
+ <code>attempt_200707121733_0003_m_000005_0</code> , which represents the
+ zeroth task attempt for the fifth map task in the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskAttemptID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskAttemptID -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <class name="TaskCompletionEvent" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskCompletionEvent"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor for Writable.]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskCompletionEvent" type="int, org.apache.hadoop.mapred.TaskAttemptID, int, boolean, org.apache.hadoop.mapred.TaskCompletionEvent.Status, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor. eventId should be created externally and incremented
+ per event for each job.
+ @param eventId event id, event id should be unique and assigned in
+ incrementally, starting from 0.
+ @param taskId task id
+ @param status task's status
+ @param taskTrackerHttp task tracker's host:port for http.]]>
+ </doc>
+ </constructor>
+ <method name="getEventId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns event Id.
+ @return event id]]>
+ </doc>
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getTaskAttemptId()} instead.">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id
+ @deprecated use {@link #getTaskAttemptId()} instead.]]>
+ </doc>
+ </method>
+ <method name="getTaskAttemptId" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id]]>
+ </doc>
+ </method>
+ <method name="getTaskStatus" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns enum Status.SUCESS or Status.FAILURE.
+ @return task tracker status]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerHttp" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[http location of the tasktracker where this task ran.
+ @return http location of tasktracker user logs]]>
+ </doc>
+ </method>
+ <method name="getTaskRunTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns time (in millisec) the task took to complete.]]>
+ </doc>
+ </method>
+ <method name="setTaskRunTime"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskCompletionTime" type="int"/>
+ <doc>
+ <![CDATA[Set the task completion time
+ @param taskCompletionTime time (in millisec) the task took to complete]]>
+ </doc>
+ </method>
+ <method name="setEventId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="eventId" type="int"/>
+ <doc>
+ <![CDATA[set event Id. should be assigned incrementally starting from 0.
+ @param eventId]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #setTaskID(TaskAttemptID)} instead.">
+ <param name="taskId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId
+ @deprecated use {@link #setTaskID(TaskAttemptID)} instead.]]>
+ </doc>
+ </method>
+ <method name="setTaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId]]>
+ </doc>
+ </method>
+ <method name="setTaskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"/>
+ <doc>
+ <![CDATA[Set task status.
+ @param status]]>
+ </doc>
+ </method>
+ <method name="setTaskTrackerHttp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTrackerHttp" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set task tracker http location.
+ @param taskTrackerHttp]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isMapTask" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="idWithinJob" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="EMPTY_ARRAY" type="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is used to track task completion events on
+ job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <class name="TaskCompletionEvent.Status" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.TaskCompletionEvent.Status&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <!-- start class org.apache.hadoop.mapred.TaskID -->
+ <class name="TaskID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskID" type="org.apache.hadoop.mapred.JobID, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskID object from given {@link JobID}.
+ @param jobId JobID that this tip belongs to
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskID" type="java.lang.String, int, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskInProgressId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link JobID} object that this tip belongs to]]>
+ </doc>
+ </method>
+ <method name="isMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns whether this TaskID is a map ID]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapred.ID"/>
+ <doc>
+ <![CDATA[Compare TaskInProgressIds by first jobIds, then by tip numbers. Reduces are
+ defined as greater then maps.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskID object from given string
+ @return constructed TaskID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getTaskIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <param name="isMap" type="java.lang.Boolean"/>
+ <param name="taskId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>the first map task</i>
+ of <i>any jobtracker</i>, of <i>any job</i>, we would use :
+ <pre>
+ TaskID.getTaskIDsPattern(null, null, true, 1);
+ </pre>
+ which will return :
+ <pre> "task_[^_]*_[0-9]*_m_000001*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null
+ @param taskId taskId number, or null
+ @return a regex pattern matching TaskIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[TaskID represents the immutable and unique identifier for
+ a Map or Reduce Task. Each TaskID encompasses multiple attempts made to
+ execute the Map or Reduce Task, each of which are uniquely indentified by
+ their TaskAttemptID.
+
+ TaskID consists of 3 parts. First part is the {@link JobID}, that this
+ TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r'
+ representing whether the task is a map task or a reduce task.
+ And the third part is the task number. <br>
+ An example TaskID is :
+ <code>task_200707121733_0003_m_000005</code> , which represents the
+ fifth map task in the third job running at the jobtracker
+ started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskID -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog -->
+ <class name="TaskLog" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLog"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskLogFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="getRealTaskLogFileLocation" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="getIndexFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ </method>
+ <method name="getIndexFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="isCleanup" type="boolean"/>
+ </method>
+ <method name="syncLogs"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="firstTaskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="syncLogs"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="firstTaskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="isCleanup" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logsRetainHours" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Purge old user logs.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskLogLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the desired maximum length of task's logs.
+ @param conf the job to look in
+ @return the number of bytes to cap the log files at]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ If the tailLength is 0, the entire output will be saved.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="setup" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ Setup commands such as setting memory limit can be passed which
+ will be executed before exec.
+ If the tailLength is 0, the entire output will be saved.
+ @param setup The setup commands for the execed process.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="setup" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <param name="pidFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ Setup commands such as setting memory limit can be passed which
+ will be executed before exec.
+ If the tailLength is 0, the entire output will be saved.
+ @param setup The setup commands for the execed process.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @param pidFileName The name of the pid-file
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="addCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="isExecutable" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add quotes to each of the command strings and
+ return as a single string
+ @param cmd The command to be quoted
+ @param isExecutable makes shell path if the first
+ argument is executable
+ @return returns The quoted string.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="captureDebugOut" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List&lt;java.lang.String&gt;"/>
+ <param name="debugoutFilename" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture debug script's
+ stdout and stderr to debugout.
+ @param cmd The command and the arguments that should be run
+ @param debugoutFilename The filename that stdout and stderr
+ should be saved to.
+ @return the modified command that should be run
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple logger to handle the task-specific user logs.
+ This class uses the system property <code>hadoop.log.dir</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <class name="TaskLog.LogName" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.TaskLog.LogName&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskLog.LogName[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskLog.LogName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The filter for userlogs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogAppender -->
+ <class name="TaskLogAppender" extends="org.apache.log4j.FileAppender"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogAppender"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="activateOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Getter/Setter methods for log4j.]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ </method>
+ <method name="getTotalLogFileSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setTotalLogFileSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logSize" type="long"/>
+ </method>
+ <doc>
+ <![CDATA[A simple log4j-appender for the task child's
+ map-reduce system logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogAppender -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogServlet -->
+ <class name="TaskLogServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskLogUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTrackerHostName" type="java.lang.String"/>
+ <param name="httpPort" type="java.lang.String"/>
+ <param name="taskAttemptID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Construct the taskLogUrl
+ @param taskTrackerHostName
+ @param httpPort
+ @param taskAttemptID
+ @return the taskLogUrl]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the logs via http.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A servlet that is run by the TaskTrackers to provide the task logs via http.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskReport -->
+ <class name="TaskReport" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskReport"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getTaskID()} instead">
+ <doc>
+ <![CDATA[@deprecated use {@link #getTaskID()} instead]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The id of the task.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The amount completed, between zero and one.]]>
+ </doc>
+ </method>
+ <method name="getState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The most recent state, reported by a {@link Reporter}.]]>
+ </doc>
+ </method>
+ <method name="getDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A list of error messages.]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A table of counters.]]>
+ </doc>
+ </method>
+ <method name="getFinishTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get finish time of task.
+ @return 0, if finish time was not set else returns finish time.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get start time of task.
+ @return 0 if start time was not set, else start time.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A report on the state of a task.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskReport -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker -->
+ <class name="TaskTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.TaskUmbilicalProtocol"/>
+ <implements name="java.lang.Runnable"/>
+ <constructor name="TaskTracker" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start with the local machine name, and the default JobTracker]]>
+ </doc>
+ </constructor>
+ <method name="getTaskTrackerInstrumentation" return="org.apache.hadoop.mapred.TaskTrackerInstrumentation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getInstrumentationClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.TaskTrackerInstrumentation&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setInstrumentationClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="t" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.TaskTrackerInstrumentation&gt;"/>
+ </method>
+ <method name="cleanupStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Removes all contents of temporary storage. Called upon
+ startup, to remove any leftovers from previous run.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close down the TaskTracker and all its components. We must also shutdown
+ any running tasks or threads, and cleanup disk space. A new TaskTracker
+ within the same process space might be restarted, so everything must be
+ clean.]]>
+ </doc>
+ </method>
+ <method name="getJobClient" return="org.apache.hadoop.mapred.InterTrackerProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The connection to the JobTracker, used by the TaskRunner
+ for locating remote files.]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerReportAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the port at which the tasktracker bound to]]>
+ </doc>
+ </method>
+ <method name="getJvmManagerInstance" return="org.apache.hadoop.mapred.JvmManager"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The server retry loop.
+ This while-loop attempts to connect to the JobTracker. It only
+ loops when the old TaskTracker has gone bad (its state is
+ stale somehow) and we need to reinitialize everything.]]>
+ </doc>
+ </method>
+ <method name="getTask" return="org.apache.hadoop.mapred.JvmTask"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jvmId" type="org.apache.hadoop.mapred.JVMId"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called upon startup by the child process, to fetch Task data.]]>
+ </doc>
+ </method>
+ <method name="statusUpdate" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskStatus" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called periodically to report Task progress, from 0.0 to 1.0.]]>
+ </doc>
+ </method>
+ <method name="reportDiagnosticInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="info" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when the task dies before completion, and we want to report back
+ diagnostic info]]>
+ </doc>
+ </method>
+ <method name="reportNextRecordRange"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="range" type="org.apache.hadoop.mapred.SortedRanges.Range"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ping" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Child checking to see if we're alive. Normally does nothing.]]>
+ </doc>
+ </method>
+ <method name="commitPending"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskStatus" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Task is reporting that it is in commit_pending
+ and it is waiting for the commit Response]]>
+ </doc>
+ </method>
+ <method name="canCommit" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Child checking whether it can commit]]>
+ </doc>
+ </method>
+ <method name="done"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The task is done.]]>
+ </doc>
+ </method>
+ <method name="shuffleError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A reduce-task failed to shuffle the map-outputs. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="fsError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A child task had a local filesystem error. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="getMapCompletionEvents" return="org.apache.hadoop.mapred.MapTaskCompletionEventsUpdate"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxLocs" type="int"/>
+ <param name="id" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mapOutputLost"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="errorMsg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A completed map task's output has been lost.]]>
+ </doc>
+ </method>
+ <method name="isIdle" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this task tracker idle?
+ @return has this task tracker finished and cleaned up all of its tasks?]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Start the TaskTracker, point toward the indicated JobTracker]]>
+ </doc>
+ </method>
+ <method name="isTaskMemoryManagerEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the TaskMemoryManager Enabled on this system?
+ @return true if enabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getTaskMemoryManager" return="org.apache.hadoop.mapred.TaskMemoryManagerThread"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MR_CLIENTTRACE_FORMAT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ClientTraceLog" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[TaskTracker is a process that starts and tracks MR Tasks
+ in a networked environment. It contacts the JobTracker
+ for Task assignments and reporting results.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <class name="TaskTracker.MapOutputServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskTracker.MapOutputServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in TaskTracker's Jetty to serve the map outputs
+ to other nodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <!-- start class org.apache.hadoop.mapred.TextInputFormat -->
+ <class name="TextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="TextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Keys are
+ the position in the file, and values are the line of text..]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat -->
+ <class name="TextOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes plain text files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+ <class name="TextOutputFormat.LineRecordWriter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"/>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="out" type="java.io.DataOutputStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+</package>
+<package name="org.apache.hadoop.mapred.jobcontrol">
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <class name="Job" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf, java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+ @param jobConf a mapred job configuration representing a job to be executed.
+ @param dependingJobs an array of jobs the current job depends on]]>
+ </doc>
+ </constructor>
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+
+ @param jobConf mapred job configuration representing a job to be executed.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job name of this job]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job name for this job.
+ @param jobName the job name]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job ID of this job assigned by JobControl]]>
+ </doc>
+ </method>
+ <method name="setJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job ID for this job.
+ @param id the job ID]]>
+ </doc>
+ </method>
+ <method name="getMapredJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getAssignedJobID()} instead">
+ <doc>
+ <![CDATA[@return the mapred ID of this job
+ @deprecated use {@link #getAssignedJobID()} instead]]>
+ </doc>
+ </method>
+ <method name="setMapredJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #setAssignedJobID(JobID)} instead">
+ <param name="mapredJobID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job.
+ @param mapredJobID the mapred job ID for this job.
+ @deprecated use {@link #setAssignedJobID(JobID)} instead]]>
+ </doc>
+ </method>
+ <method name="getAssignedJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred ID of this job as assigned by the
+ mapred framework.]]>
+ </doc>
+ </method>
+ <method name="setAssignedJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mapredJobID" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job as assigned by the
+ mapred framework.
+ @param mapredJobID the mapred job ID for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred job conf of this job]]>
+ </doc>
+ </method>
+ <method name="setJobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Set the mapred job conf for this job.
+ @param jobConf the mapred job conf for this job.]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the state of this job]]>
+ </doc>
+ </method>
+ <method name="setState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Set the state for this job.
+ @param state the new state for this job.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the message of this job]]>
+ </doc>
+ </method>
+ <method name="setMessage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="message" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the message for this job.
+ @param message the message for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobClient" return="org.apache.hadoop.mapred.JobClient"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job client of this job]]>
+ </doc>
+ </method>
+ <method name="getDependingJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the depending jobs of this job]]>
+ </doc>
+ </method>
+ <method name="addDependingJob" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dependingJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a job to this jobs' dependency list. Dependent jobs can only be added while a Job
+ is waiting to run, not during or afterwards.
+
+ @param dependingJob Job that this Job depends on.
+ @return <tt>true</tt> if the Job was added.]]>
+ </doc>
+ </method>
+ <method name="isCompleted" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in a complete state]]>
+ </doc>
+ </method>
+ <method name="isReady" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in READY state]]>
+ </doc>
+ </method>
+ <method name="submit"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Submit this job to mapred. The state becomes RUNNING if submission
+ is successful, FAILED otherwise.]]>
+ </doc>
+ </method>
+ <field name="SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WAITING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEPENDENT_FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class encapsulates a MapReduce job and its dependency. It monitors
+ the states of the depending jobs and updates the state of this job.
+ A job starts in the WAITING state. If it does not have any depending jobs, or
+ all of the depending jobs are in SUCCESS state, then the job state will become
+ READY. If any depending jobs fail, the job will fail too.
+ When in READY state, the job can be submitted to Hadoop for execution, with
+ the state changing into RUNNING state. From RUNNING state, the job can get into
+ SUCCESS or FAILED state, depending the status of the job execution.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+ <class name="JobControl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobControl" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a job control for a group of jobs.
+ @param groupName a name identifying this group]]>
+ </doc>
+ </constructor>
+ <method name="getWaitingJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the waiting state]]>
+ </doc>
+ </method>
+ <method name="getRunningJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the running state]]>
+ </doc>
+ </method>
+ <method name="getReadyJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the ready state]]>
+ </doc>
+ </method>
+ <method name="getSuccessfulJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the success state]]>
+ </doc>
+ </method>
+ <method name="getFailedJobs" return="java.util.ArrayList&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="addJob" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="aJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a new job.
+ @param aJob the the new job]]>
+ </doc>
+ </method>
+ <method name="addJobs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobs" type="java.util.Collection&lt;org.apache.hadoop.mapred.jobcontrol.Job&gt;"/>
+ <doc>
+ <![CDATA[Add a collection of jobs
+
+ @param jobs]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the thread state]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[set the thread state to STOPPING so that the
+ thread will stop when it wakes up.]]>
+ </doc>
+ </method>
+ <method name="suspend"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[suspend the running thread]]>
+ </doc>
+ </method>
+ <method name="resume"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[resume the suspended thread]]>
+ </doc>
+ </method>
+ <method name="allFinished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The main loop for the thread.
+ The loop does the following:
+ Check the states of the running jobs
+ Update the states of waiting jobs
+ Submit the jobs in ready state]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a set of MapReduce jobs and its dependency. It tracks
+ the states of the jobs by placing them into different tables according to their
+ states.
+
+ This class provides APIs for the client app to add a job to the group and to get
+ the jobs in the group in different states. When a
+ job is added, an ID unique to the group is assigned to the job.
+
+ This class has a thread that submits jobs when they become ready, monitors the
+ states of the running jobs, and updates the states of jobs based on the state changes
+ of their depending jobs states. The class provides APIs for suspending/resuming
+ the thread,and for stopping the thread.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+</package>
+<package name="org.apache.hadoop.mapred.join">
+ <!-- start class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <class name="ArrayListBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"/>
+ <constructor name="ArrayListBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayListBackedIterator" type="java.util.ArrayList&lt;X&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. The
+ implementation uses an {@link java.util.ArrayList} to store elements
+ added to it, replaying them as requested.
+ Prefer {@link StreamBackedIterator}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <interface name="ComposableInputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Refinement of InputFormat requiring implementors to provide
+ ComposableRecordReader instead of RecordReader.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <interface name="ComposableRecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"/>
+ <implements name="java.lang.Comparable&lt;org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;&gt;"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key this RecordReader would supply on a call to next(K,V)]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RecordReader into the object provided.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the stream is not empty, but provides no guarantee that
+ a call to next(K,V) will succeed.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[While key-value pairs from this RecordReader match the given key, register
+ them with the JoinCollector provided.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Additional operations required of a RecordReader to participate in a join.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <class name="CompositeInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="CompositeInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Interpret a given string as a composite expression.
+ {@code
+ func ::= <ident>([<func>,]*<func>)
+ func ::= tbl(<class>,"<path>")
+ class ::= @see java.lang.Class#forName(java.lang.String)
+ path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String)
+ }
+ Reads expression from the <tt>mapred.join.expr</tt> property and
+ user-supplied join types from <tt>mapred.join.define.&lt;ident&gt;</tt>
+ types. Paths supplied to <tt>tbl</tt> are given as input paths to the
+ InputFormat class listed.
+ @see #compose(java.lang.String, java.lang.Class, java.lang.String...)]]>
+ </doc>
+ </method>
+ <method name="addDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds the default set of identifiers to the parser.]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a CompositeInputSplit from the child InputFormats by assigning the
+ ith split from each child to the ith composite split.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a CompositeRecordReader for the children of this InputFormat
+ as defined in the init expression.
+ The outermost join need only be composable, not necessarily a composite.
+ Mandating TupleWritable isn't strictly correct.]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given InputFormat class (inf), path (p) return:
+ {@code tbl(<inf>, <p>) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="path" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An InputFormat capable of performing joins over a set of data sources sorted
+ and partitioned the same way.
+ @see #setFormat
+
+ A user may define new join types by setting the property
+ <tt>mapred.join.define.&lt;ident&gt;</tt> to a classname. In the expression
+ <tt>mapred.join.expr</tt>, the identifier will be assumed to be a
+ ComposableRecordReader.
+ <tt>mapred.join.keycomparator</tt> can be a classname used to compare keys
+ in the join.
+ @see JoinRecordReader
+ @see MultiFilterRecordReader]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <class name="CompositeInputSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="CompositeInputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CompositeInputSplit" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.mapred.InputSplit"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an InputSplit to this collection.
+ @throws IOException If capacity was not specified during construction
+ or if capacity has been reached.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the aggregate length of all child InputSplits currently added.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the length of ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Collect a set of hosts from all child InputSplits.]]>
+ </doc>
+ </method>
+ <method name="getLocation" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[getLocations from ith InputSplit.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write splits in the following format.
+ {@code
+ <count><class1><class2>...<classn><split1><split2>...<splitn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}
+ @throws IOException If the child InputSplit cannot be read, typically
+ for faliing access checks.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This InputSplit contains a set of child InputSplits. Any InputSplit inserted
+ into this collection must have a public default constructor.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <class name="CompositeRecordReader" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="CompositeRecordReader" type="int, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a RecordReader with <tt>capacity</tt> children to position
+ <tt>id</tt> in the parent reader.
+ The id of a root CompositeRecordReader is -1 by convention, but relying
+ on this is not recommended.]]>
+ </doc>
+ </constructor>
+ <method name="combine" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ </method>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordReaderQueue" return="java.util.PriorityQueue&lt;org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return sorted list of RecordReaders for this composite.]]>
+ </doc>
+ </method>
+ <method name="getComparator" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return comparator defining the ordering for RecordReaders in this
+ composite.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rr" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ? extends V&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a RecordReader to this collection.
+ The id() of a RecordReader determines where in the Tuple its
+ entry will appear. Adding RecordReaders with the same id has
+ undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key for the current join or the value at the top of the
+ RecordReader heap.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the top of this RR into the given object.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if it is possible that this could emit more values.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Pass skip key to child RRs.]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Obtain an iterator over the child RRs apropos of the value type
+ ultimately emitted from this join.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If key provided matches that of this Composite, give JoinCollector
+ iterator over values it may emit.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For all child RRs offering the key provided, obtain an iterator
+ at that position in the JoinCollector.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key of join or head of heap
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new key value common to all child RRs.
+ @throws ClassCastException if key classes differ.]]>
+ </doc>
+ </method>
+ <method name="createInternalValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a value to be used internally for joins.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unsupported (returns zero in all cases).]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all child RRs.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report progress as the minimum of all child RR progress.]]>
+ </doc>
+ </method>
+ <field name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, V, X&gt;.JoinCollector"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="kids" type="org.apache.hadoop.mapred.join.ComposableRecordReader[]"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A RecordReader that can effect joins of RecordReaders sharing a common key
+ type and partitioning.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <class name="InnerJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader&lt;K&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Return true iff the tuple is full (all data sources contain this key).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full inner join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <class name="JoinRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, org.apache.hadoop.io.Writable, org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="JoinRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Emit the next set of key, value pairs as defined by the child
+ RecordReaders and operation associated with this composite RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;org.apache.hadoop.mapred.join.TupleWritable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator wrapping the JoinCollector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite joins returning Tuples of arbitrary Writables.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <class name="JoinRecordReader.JoinDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;org.apache.hadoop.mapred.join.TupleWritable&gt;"/>
+ <constructor name="JoinRecordReader.JoinDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Since the JoinCollector is effecting our operation, we need only
+ provide an iterator proxy wrapping its operation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <class name="MultiFilterRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader&lt;K, V, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, V&gt;"/>
+ <constructor name="MultiFilterRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="emit" return="V extends org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For each tuple emitted, return a value (typically one of the values
+ in the tuple).
+ Modifying the Writables in the tuple is permitted and unlikely to affect
+ join behavior in most cases, but it is not recommended. It's safer to
+ clone first.]]>
+ </doc>
+ </method>
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Default implementation offers {@link #emit} every Tuple from the
+ collector (the outer join of child RRs).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createValue" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator&lt;V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator returning a single value from the tuple.
+ @see MultiFilterDelegationIterator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite join returning values derived from multiple
+ sources, but generally not tuples.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <class name="MultiFilterRecordReader.MultiFilterDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;V&gt;"/>
+ <constructor name="MultiFilterRecordReader.MultiFilterDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="V extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy the JoinCollector, but include callback to emit.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <class name="OuterJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader&lt;K&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit everything from the collector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full outer join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <class name="OverrideRecordReader" extends="org.apache.hadoop.mapred.join.MultiFilterRecordReader&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="emit" return="V extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit the value with the highest position in the tuple.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instead of filling the JoinCollector with iterators from all
+ data sources, fill only the rightmost for this key.
+ This not only saves space by discarding the other sources, but
+ it also emits the number of key-value pairs in the preferred
+ RecordReader instead of repeating that stream n times, where
+ n is the cardinality of the cross product of the discarded
+ streams for the given key.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Prefer the &quot;rightmost&quot; data source for this key.
+ For example, <tt>override(S1,S2,S3)</tt> will prefer values
+ from S3 over S2, and values from S2 over S1 for all keys
+ emitted from all sources.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser -->
+ <class name="Parser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Very simple shift-reduce parser for join expressions.
+
+ This should be sufficient for the user extension permitted now, but ought to
+ be replaced with a parser generator if more complex grammars are supported.
+ In particular, this &quot;shift-reduce&quot; parser has no states. Each set
+ of formals requires a different internal node type, which is responsible for
+ interpreting the list of tokens it receives. This is sufficient for the
+ current grammar, but it has several annoying properties that might inhibit
+ extension. In particular, parenthesis are always function calls; an
+ algebraic or filter grammar would not only require a node type, but must
+ also work around the internals of this parser.
+
+ For most other cases, adding classes to the hierarchy- particularly by
+ extending JoinRecordReader and MultiFilterRecordReader- is fairly
+ straightforward. One need only override the relevant method(s) (usually only
+ {@link CompositeRecordReader#combine}) and include a property to map its
+ value to an identifier in the parser.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Node -->
+ <class name="Parser.Node" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat"/>
+ <constructor name="Parser.Node" type="java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addIdentifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="ident" type="java.lang.String"/>
+ <param name="mcstrSig" type="java.lang.Class[]"/>
+ <param name="nodetype" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.join.Parser.Node&gt;"/>
+ <param name="cl" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.join.ComposableRecordReader&gt;"/>
+ <exception name="NoSuchMethodException" type="java.lang.NoSuchMethodException"/>
+ <doc>
+ <![CDATA[For a given identifier, add a mapping to the nodetype for the parse
+ tree and to the ComposableRecordReader to be created, including the
+ formals required to invoke the constructor.
+ The nodetype and constructor signature should be filled in from the
+ child node.]]>
+ </doc>
+ </method>
+ <method name="setID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="int"/>
+ </method>
+ <method name="setKeyComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="cmpcl" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"/>
+ </method>
+ <field name="rrCstrMap" type="java.util.Map&lt;java.lang.String, java.lang.reflect.Constructor&lt;? extends org.apache.hadoop.mapred.join.ComposableRecordReader&gt;&gt;"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ident" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="cmpcl" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparator&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Node -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <class name="Parser.NodeToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <class name="Parser.NumToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.NumToken" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <class name="Parser.StrToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.StrToken" type="org.apache.hadoop.mapred.join.Parser.TType, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Token -->
+ <class name="Parser.Token" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getType" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Tagged-union type for tokens from the join expression.
+ @see Parser.TType]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Token -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.TType -->
+ <class name="Parser.TType" extends="java.lang.Enum&lt;org.apache.hadoop.mapred.join.Parser.TType&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.join.Parser.TType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.TType -->
+ <!-- start interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <interface name="ResetableIterator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True if a call to next may return a value. This is permitted false
+ positives, but not false negatives.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign next value to actual.
+ It is required that elements added to a ResetableIterator be returned in
+ the same order after a call to {@link #reset} (FIFO).
+
+ Note that a call to this may fail for nested joins (i.e. more elements
+ available, but none satisfying the constraints of the join)]]>
+ </doc>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign last value returned to actual.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set iterator to return to the start of its range. Must be called after
+ calling {@link #add} to avoid a ConcurrentModificationException.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="T extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an element to the collection of elements to iterate over.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close datasources and release resources. Calling methods on the iterator
+ after calling close has undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Close datasources, but do not release internal resources. Calling this
+ method should permit the object to be reused with a different datasource.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This defines an interface to a stateful Iterator that can replay elements
+ added to it directly.
+ Note that this does not extend {@link java.util.Iterator}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <class name="ResetableIterator.EMPTY" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;U&gt;"/>
+ <constructor name="ResetableIterator.EMPTY"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <!-- start class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <class name="StreamBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator&lt;X&gt;"/>
+ <constructor name="StreamBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="X extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. This
+ implementation uses a byte array to store elements added to it.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.TupleWritable -->
+ <class name="TupleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable&lt;org.apache.hadoop.io.Writable&gt;"/>
+ <constructor name="TupleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty tuple with no allocated storage for writables.]]>
+ </doc>
+ </constructor>
+ <constructor name="TupleWritable" type="org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Initialize tuple with storage; unknown whether any of them contain
+ &quot;written&quot; values.]]>
+ </doc>
+ </constructor>
+ <method name="has" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Return true if tuple has an element at the position provided.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith Writable from Tuple.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of children in this Tuple.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="iterator" return="java.util.Iterator&lt;org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator over the elements in this tuple.
+ Note that this doesn't flatten the tuple; one may receive tuples
+ from this iterator.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert Tuple to String as in the following.
+ <tt>[<child1>,<child2>,...,<childn>]</tt>]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes each Writable to <code>out</code>.
+ TupleWritable format:
+ {@code
+ <count><type1><type2>...<typen><obj1><obj2>...<objn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writable type storing multiple {@link org.apache.hadoop.io.Writable}s.
+
+ This is *not* a general-purpose tuple type. In almost all cases, users are
+ encouraged to implement their own serializable types, which can perform
+ better validation and provide more efficient encodings than this class is
+ capable. TupleWritable relies on the join framework for type safety and
+ assumes its instances will rarely be persisted, assumptions not only
+ incompatible with, but contrary to the general case.
+
+ @see org.apache.hadoop.io.Writable]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.TupleWritable -->
+ <!-- start class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+ <class name="WrappedRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, U&gt;"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="key" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key at the head of this RR.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qkey" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RR into the object supplied.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if the RR- including the k,v pair stored in this object-
+ is exhausted.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next k,v pair into the head of this object; return true iff
+ the RR and this are exhausted.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an iterator to the collector at the position occupied by this
+ RecordReader over the values in this stream paired with the key
+ provided (ie register a stream of values from this source matching K
+ with a collector).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="U extends org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write key-value pair at the head of this stream to the objects provided;
+ get next key-value pair from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new key from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="U extends org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new value from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request progress from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request position from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Forward close request to proxied RR.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader&lt;K, ?&gt;"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key at head of proxied RR
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true iff compareTo(other) retn true.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy class for a RecordReader participating in the join framework.
+ This class keeps track of the &quot;head&quot; key-value pair for the
+ provided RecordReader and keeps a store of values matching a key when
+ this source is participating in a join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+</package>
+<package name="org.apache.hadoop.mapred.lib">
+ <!-- start class org.apache.hadoop.mapred.lib.ChainMapper -->
+ <class name="ChainMapper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper"/>
+ <constructor name="ChainMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor.]]>
+ </doc>
+ </constructor>
+ <method name="addMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="klass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&lt;K1, V1, K2, V2&gt;&gt;"/>
+ <param name="inputKeyClass" type="java.lang.Class&lt;? extends K1&gt;"/>
+ <param name="inputValueClass" type="java.lang.Class&lt;? extends V1&gt;"/>
+ <param name="outputKeyClass" type="java.lang.Class&lt;? extends K2&gt;"/>
+ <param name="outputValueClass" type="java.lang.Class&lt;? extends V2&gt;"/>
+ <param name="byValue" type="boolean"/>
+ <param name="mapperConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Adds a Mapper class to the chain job's JobConf.
+ <p/>
+ It has to be specified how key and values are passed from one element of
+ the chain to the next, by value or by reference. If a Mapper leverages the
+ assumed semantics that the key and values are not modified by the collector
+ 'by value' must be used. If the Mapper does not expect this semantics, as
+ an optimization to avoid serialization and deserialization 'by reference'
+ can be used.
+ <p/>
+ For the added Mapper the configuration given for it,
+ <code>mapperConf</code>, have precedence over the job's JobConf. This
+ precedence is in effect when the task is running.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain
+ <p/>
+
+ @param job job's JobConf to add the Mapper class.
+ @param klass the Mapper class to add.
+ @param inputKeyClass mapper input key class.
+ @param inputValueClass mapper input value class.
+ @param outputKeyClass mapper output key class.
+ @param outputValueClass mapper output value class.
+ @param byValue indicates if key/values should be passed by value
+ to the next Mapper in the chain, if any.
+ @param mapperConf a JobConf with the configuration for the Mapper
+ class. It is recommended to use a JobConf without default values using the
+ <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configures the ChainMapper and all the Mappers in the chain.
+ <p/>
+ If this method is overriden <code>super.configure(...)</code> should be
+ invoked at the beginning of the overwriter method.]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Chains the <code>map(...)</code> methods of the Mappers in the chain.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes the ChainMapper and all the Mappers in the chain.
+ <p/>
+ If this method is overriden <code>super.close()</code> should be
+ invoked at the end of the overwriter method.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The ChainMapper class allows to use multiple Mapper classes within a single
+ Map task.
+ <p/>
+ The Mapper classes are invoked in a chained (or piped) fashion, the output of
+ the first becomes the input of the second, and so on until the last Mapper,
+ the output of the last Mapper will be written to the task's output.
+ <p/>
+ The key functionality of this feature is that the Mappers in the chain do not
+ need to be aware that they are executed in a chain. This enables having
+ reusable specialized Mappers that can be combined to perform composite
+ operations within a single task.
+ <p/>
+ Special care has to be taken when creating chains that the key/values output
+ by a Mapper are valid for the following Mapper in the chain. It is assumed
+ all Mappers and the Reduce in the chain use maching output and input key and
+ value classes as no conversion is done by the chaining code.
+ <p/>
+ Using the ChainMapper and the ChainReducer classes is possible to compose
+ Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
+ immediate benefit of this pattern is a dramatic reduction in disk IO.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain.
+ <p/>
+ ChainMapper usage pattern:
+ <p/>
+ <pre>
+ ...
+ conf.setJobName("chain");
+ conf.setInputFormat(TextInputFormat.class);
+ conf.setOutputFormat(TextOutputFormat.class);
+ <p/>
+ JobConf mapAConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
+ Text.class, Text.class, true, mapAConf);
+ <p/>
+ JobConf mapBConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
+ LongWritable.class, Text.class, false, mapBConf);
+ <p/>
+ JobConf reduceConf = new JobConf(false);
+ ...
+ ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
+ Text.class, Text.class, true, reduceConf);
+ <p/>
+ ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
+ LongWritable.class, Text.class, false, null);
+ <p/>
+ ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
+ LongWritable.class, LongWritable.class, true, null);
+ <p/>
+ FileInputFormat.setInputPaths(conf, inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+ ...
+ <p/>
+ JobClient jc = new JobClient(conf);
+ RunningJob job = jc.submitJob(conf);
+ ...
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.ChainMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.ChainReducer -->
+ <class name="ChainReducer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer"/>
+ <constructor name="ChainReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor.]]>
+ </doc>
+ </constructor>
+ <method name="setReducer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="klass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Reducer&lt;K1, V1, K2, V2&gt;&gt;"/>
+ <param name="inputKeyClass" type="java.lang.Class&lt;? extends K1&gt;"/>
+ <param name="inputValueClass" type="java.lang.Class&lt;? extends V1&gt;"/>
+ <param name="outputKeyClass" type="java.lang.Class&lt;? extends K2&gt;"/>
+ <param name="outputValueClass" type="java.lang.Class&lt;? extends V2&gt;"/>
+ <param name="byValue" type="boolean"/>
+ <param name="reducerConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Sets the Reducer class to the chain job's JobConf.
+ <p/>
+ It has to be specified how key and values are passed from one element of
+ the chain to the next, by value or by reference. If a Reducer leverages the
+ assumed semantics that the key and values are not modified by the collector
+ 'by value' must be used. If the Reducer does not expect this semantics, as
+ an optimization to avoid serialization and deserialization 'by reference'
+ can be used.
+ <p/>
+ For the added Reducer the configuration given for it,
+ <code>reducerConf</code>, have precedence over the job's JobConf. This
+ precedence is in effect when the task is running.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainReducer, this is done by the setReducer or the addMapper for the last
+ element in the chain.
+
+ @param job job's JobConf to add the Reducer class.
+ @param klass the Reducer class to add.
+ @param inputKeyClass reducer input key class.
+ @param inputValueClass reducer input value class.
+ @param outputKeyClass reducer output key class.
+ @param outputValueClass reducer output value class.
+ @param byValue indicates if key/values should be passed by value
+ to the next Mapper in the chain, if any.
+ @param reducerConf a JobConf with the configuration for the Reducer
+ class. It is recommended to use a JobConf without default values using the
+ <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.]]>
+ </doc>
+ </method>
+ <method name="addMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="klass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&lt;K1, V1, K2, V2&gt;&gt;"/>
+ <param name="inputKeyClass" type="java.lang.Class&lt;? extends K1&gt;"/>
+ <param name="inputValueClass" type="java.lang.Class&lt;? extends V1&gt;"/>
+ <param name="outputKeyClass" type="java.lang.Class&lt;? extends K2&gt;"/>
+ <param name="outputValueClass" type="java.lang.Class&lt;? extends V2&gt;"/>
+ <param name="byValue" type="boolean"/>
+ <param name="mapperConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Adds a Mapper class to the chain job's JobConf.
+ <p/>
+ It has to be specified how key and values are passed from one element of
+ the chain to the next, by value or by reference. If a Mapper leverages the
+ assumed semantics that the key and values are not modified by the collector
+ 'by value' must be used. If the Mapper does not expect this semantics, as
+ an optimization to avoid serialization and deserialization 'by reference'
+ can be used.
+ <p/>
+ For the added Mapper the configuration given for it,
+ <code>mapperConf</code>, have precedence over the job's JobConf. This
+ precedence is in effect when the task is running.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain
+ .
+
+ @param job chain job's JobConf to add the Mapper class.
+ @param klass the Mapper class to add.
+ @param inputKeyClass mapper input key class.
+ @param inputValueClass mapper input value class.
+ @param outputKeyClass mapper output key class.
+ @param outputValueClass mapper output value class.
+ @param byValue indicates if key/values should be passed by value
+ to the next Mapper in the chain, if any.
+ @param mapperConf a JobConf with the configuration for the Mapper
+ class. It is recommended to use a JobConf without default values using the
+ <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configures the ChainReducer, the Reducer and all the Mappers in the chain.
+ <p/>
+ If this method is overriden <code>super.configure(...)</code> should be
+ invoked at the beginning of the overwriter method.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="values" type="java.util.Iterator"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Chains the <code>reduce(...)</code> method of the Reducer with the
+ <code>map(...) </code> methods of the Mappers in the chain.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes the ChainReducer, the Reducer and all the Mappers in the chain.
+ <p/>
+ If this method is overriden <code>super.close()</code> should be
+ invoked at the end of the overwriter method.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The ChainReducer class allows to chain multiple Mapper classes after a
+ Reducer within the Reducer task.
+ <p/>
+ For each record output by the Reducer, the Mapper classes are invoked in a
+ chained (or piped) fashion, the output of the first becomes the input of the
+ second, and so on until the last Mapper, the output of the last Mapper will
+ be written to the task's output.
+ <p/>
+ The key functionality of this feature is that the Mappers in the chain do not
+ need to be aware that they are executed after the Reducer or in a chain.
+ This enables having reusable specialized Mappers that can be combined to
+ perform composite operations within a single task.
+ <p/>
+ Special care has to be taken when creating chains that the key/values output
+ by a Mapper are valid for the following Mapper in the chain. It is assumed
+ all Mappers and the Reduce in the chain use maching output and input key and
+ value classes as no conversion is done by the chaining code.
+ <p/>
+ Using the ChainMapper and the ChainReducer classes is possible to compose
+ Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
+ immediate benefit of this pattern is a dramatic reduction in disk IO.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainReducer, this is done by the setReducer or the addMapper for the last
+ element in the chain.
+ <p/>
+ ChainReducer usage pattern:
+ <p/>
+ <pre>
+ ...
+ conf.setJobName("chain");
+ conf.setInputFormat(TextInputFormat.class);
+ conf.setOutputFormat(TextOutputFormat.class);
+ <p/>
+ JobConf mapAConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
+ Text.class, Text.class, true, mapAConf);
+ <p/>
+ JobConf mapBConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
+ LongWritable.class, Text.class, false, mapBConf);
+ <p/>
+ JobConf reduceConf = new JobConf(false);
+ ...
+ ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
+ Text.class, Text.class, true, reduceConf);
+ <p/>
+ ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
+ LongWritable.class, Text.class, false, null);
+ <p/>
+ ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
+ LongWritable.class, LongWritable.class, true, null);
+ <p/>
+ FileInputFormat.setInputPaths(conf, inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+ ...
+ <p/>
+ JobClient jc = new JobClient(conf);
+ RunningJob job = jc.submitJob(conf);
+ ...
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.ChainReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.DelegatingInputFormat -->
+ <class name="DelegatingInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <constructor name="DelegatingInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} that delegates behaviour of paths to multiple other
+ InputFormats.
+
+ @see MultipleInputs#addInputPath(JobConf, Path, Class, Class)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.DelegatingInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.DelegatingMapper -->
+ <class name="DelegatingMapper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="DelegatingMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1"/>
+ <param name="value" type="V1"/>
+ <param name="outputCollector" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link Mapper} that delegates behaviour of paths to multiple other
+ mappers.
+
+ @see MultipleInputs#addInputPath(JobConf, Path, Class, Class)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.DelegatingMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <class name="FieldSelectionMapReduce" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="FieldSelectionMapReduce"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="val" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to output.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements a mapper/reducer class that can be used to perform
+ field selections in a manner similar to unix cut. The input data is treated
+ as fields separated by a user specified separator (the default value is
+ "\t"). The user can specify a list of fields that form the map output keys,
+ and a list of fields that form the map output values. If the inputformat is
+ TextInputFormat, the mapper will ignore the key to the map function. and the
+ fields are from the value only. Otherwise, the fields are the union of those
+ from the key and those from the value.
+
+ The field separator is under attribute "mapred.data.field.separator"
+
+ The map output field list spec is under attribute "map.output.key.value.fields.spec".
+ The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
+ key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
+ Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
+ (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all
+ the fields starting from field 3. The open range field spec applies value fields only.
+ They have no effect on the key fields.
+
+ Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
+ and use fields 6,5,1,2,3,7 and above for values.
+
+ The reduce output field list spec is under attribute "reduce.output.key.value.fields.spec".
+
+ The reducer extracts output key/value pairs in a similar manner, except that
+ the key is never ignored.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <!-- start class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <class name="HashPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K2, V2&gt;"/>
+ <constructor name="HashPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numReduceTasks" type="int"/>
+ <doc>
+ <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partition keys by their {@link Object#hashCode()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <class name="IdentityMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, K, V&gt;"/>
+ <constructor name="IdentityMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="val" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, V&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to
+ output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implements the identity function, mapping inputs directly to outputs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <class name="IdentityReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;K, V, K, V&gt;"/>
+ <constructor name="IdentityReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="values" type="java.util.Iterator&lt;V&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, V&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes all keys and values directly to output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Performs no reduction, writing all input values directly to the output.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.InputSampler -->
+ <class name="InputSampler" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="InputSampler" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="writePartitionFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="sampler" type="org.apache.hadoop.mapred.lib.InputSampler.Sampler&lt;K, V&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a partition file for the given job, using the Sampler provided.
+ Queries the sampler for a sample keyset, sorts by the output key
+ comparator, selects the keys for each rank, and writes to the destination
+ returned from {@link
+ org.apache.hadoop.mapred.lib.TotalOrderPartitioner#getPartitionFile}.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Driver for InputSampler from the command line.
+ Configures a JobConf instance and calls {@link #writePartitionFile}.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Utility for collecting samples and writing a partition file for
+ {@link org.apache.hadoop.mapred.lib.TotalOrderPartitioner}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InputSampler -->
+ <!-- start class org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler -->
+ <class name="InputSampler.IntervalSampler" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.InputSampler.Sampler&lt;K, V&gt;"/>
+ <constructor name="InputSampler.IntervalSampler" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new IntervalSampler sampling <em>all</em> splits.
+ @param freq The frequency with which records will be emitted.]]>
+ </doc>
+ </constructor>
+ <constructor name="InputSampler.IntervalSampler" type="double, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new IntervalSampler.
+ @param freq The frequency with which records will be emitted.
+ @param maxSplitsSampled The maximum number of splits to examine.
+ @see #getSample]]>
+ </doc>
+ </constructor>
+ <method name="getSample" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For each split sampled, emit when the ratio of the number of records
+ retained to the total record count is less than the specified
+ frequency.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sample from s splits at regular intervals.
+ Useful for sorted data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler -->
+ <!-- start class org.apache.hadoop.mapred.lib.InputSampler.RandomSampler -->
+ <class name="InputSampler.RandomSampler" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.InputSampler.Sampler&lt;K, V&gt;"/>
+ <constructor name="InputSampler.RandomSampler" type="double, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new RandomSampler sampling <em>all</em> splits.
+ This will read every split at the client, which is very expensive.
+ @param freq Probability with which a key will be chosen.
+ @param numSamples Total number of samples to obtain from all selected
+ splits.]]>
+ </doc>
+ </constructor>
+ <constructor name="InputSampler.RandomSampler" type="double, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new RandomSampler.
+ @param freq Probability with which a key will be chosen.
+ @param numSamples Total number of samples to obtain from all selected
+ splits.
+ @param maxSplitsSampled The maximum number of splits to examine.]]>
+ </doc>
+ </constructor>
+ <method name="getSample" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Randomize the split order, then take the specified number of keys from
+ each split sampled, where each key is selected with the specified
+ probability and possibly replaced by a subsequently selected key when
+ the quota of keys from that split is satisfied.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sample from random points in the input.
+ General-purpose sampler. Takes numSamples / maxSplitsSampled inputs from
+ each split.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InputSampler.RandomSampler -->
+ <!-- start interface org.apache.hadoop.mapred.lib.InputSampler.Sampler -->
+ <interface name="InputSampler.Sampler" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getSample" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For a given job, collect and return a subset of the keys from the
+ input data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface to sample using an {@link org.apache.hadoop.mapred.InputFormat}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.InputSampler.Sampler -->
+ <!-- start class org.apache.hadoop.mapred.lib.InputSampler.SplitSampler -->
+ <class name="InputSampler.SplitSampler" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.InputSampler.Sampler&lt;K, V&gt;"/>
+ <constructor name="InputSampler.SplitSampler" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a SplitSampler sampling <em>all</em> splits.
+ Takes the first numSamples / numSplits records from each split.
+ @param numSamples Total number of samples to obtain from all selected
+ splits.]]>
+ </doc>
+ </constructor>
+ <constructor name="InputSampler.SplitSampler" type="int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new SplitSampler.
+ @param numSamples Total number of samples to obtain from all selected
+ splits.
+ @param maxSplitsSampled The maximum number of splits to examine.]]>
+ </doc>
+ </constructor>
+ <method name="getSample" return="K[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="org.apache.hadoop.mapred.InputFormat&lt;K, V&gt;"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[From each split sampled, take the first numSamples / numSplits records.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Samples the first n records from s splits.
+ Inexpensive way to sample random data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InputSampler.SplitSampler -->
+ <!-- start class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <class name="InverseMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, V, V, K&gt;"/>
+ <constructor name="InverseMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;V, K&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The inverse function. Input keys and values are swapped.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that swaps keys and values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedComparator -->
+ <class name="KeyFieldBasedComparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="KeyFieldBasedComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[This comparator implementation provides a subset of the features provided
+ by the Unix/GNU Sort. In particular, the supported features are:
+ -n, (Sort numerically)
+ -r, (Reverse the result of comparison)
+ -k pos1[,pos2], where pos is of the form f[.c][opts], where f is the number
+ of the field to use, and c is the number of the first character from the
+ beginning of the field. Fields and character posns are numbered starting
+ with 1; a character position of zero in pos2 indicates the field's last
+ character. If '.c' is omitted from pos1, it defaults to 1 (the beginning
+ of the field); if omitted from pos2, it defaults to 0 (the end of the
+ field). opts are ordering options (any of 'nr' as described above).
+ We assume that the fields in the key are separated by
+ map.output.key.field.separator.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedComparator -->
+ <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <class name="KeyFieldBasedPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K2, V2&gt;"/>
+ <constructor name="KeyFieldBasedPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K2"/>
+ <param name="value" type="V2"/>
+ <param name="numReduceTasks" type="int"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="end" type="int"/>
+ <param name="currentHash" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[Defines a way to partition keys based on certain key fields (also see
+ {@link KeyFieldBasedComparator}.
+ The key specification supported is of the form -k pos1[,pos2], where,
+ pos is of the form f[.c][opts], where f is the number
+ of the key field to use, and c is the number of the first character from
+ the beginning of the field. Fields and character posns are numbered
+ starting with 1; a character position of zero in pos2 indicates the
+ field's last character. If '.c' is omitted from pos1, it defaults to 1
+ (the beginning of the field); if omitted from pos2, it defaults to 0
+ (the end of the field).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <class name="LongSumReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;K, org.apache.hadoop.io.LongWritable, K, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="LongSumReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Reducer} that sums long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleInputs -->
+ <class name="MultipleInputs" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleInputs"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inputFormatClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <doc>
+ <![CDATA[Add a {@link Path} with a custom {@link InputFormat} to the list of
+ inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for the job
+ @param inputFormatClass {@link InputFormat} class to use for this path]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inputFormatClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.InputFormat&gt;"/>
+ <param name="mapperClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.Mapper&gt;"/>
+ <doc>
+ <![CDATA[Add a {@link Path} with a custom {@link InputFormat} and
+ {@link Mapper} to the list of inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for the job
+ @param inputFormatClass {@link InputFormat} class to use for this path
+ @param mapperClass {@link Mapper} class to use for this path]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class supports MapReduce jobs that have multiple input paths with
+ a different {@link InputFormat} and {@link Mapper} for each path]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleInputs -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <class name="MultipleOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat&lt;K, V&gt;"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a composite record writer that can write key/value data to different
+ output files
+
+ @param fs
+ the file system to use
+ @param job
+ the job conf for the job
+ @param name
+ the leaf file name for the output file (such as part-00000")
+ @param arg3
+ a progressable for reporting progress.
+ @return a composite record writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="generateLeafFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the leaf name for the output file name. The default behavior does
+ not change the leaf file name (such as part-00000)
+
+ @param name
+ the leaf file name for the output file
+ @return the given leaf file name]]>
+ </doc>
+ </method>
+ <method name="generateFileNameForKeyValue" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the file output file name based on the given key and the leaf file
+ name. The default behavior is that the file name does not depend on the
+ key.
+
+ @param key
+ the key of the output data
+ @param name
+ the leaf file name
+ @return generated file name]]>
+ </doc>
+ </method>
+ <method name="generateActualKey" return="K"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <doc>
+ <![CDATA[Generate the actual key from the given key/value. The default behavior is that
+ the actual key is equal to the given key
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual key derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="generateActualValue" return="V"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="V"/>
+ <doc>
+ <![CDATA[Generate the actual value from the given key and value. The default behavior is that
+ the actual value is equal to the given value
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual value derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="getInputFileBasedOutputFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the outfile name based on a given anme and the input file name. If
+ the map input file does not exists (i.e. this is not for a map only job),
+ the given name is returned unchanged. If the config value for
+ "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
+ name is returned unchanged. Otherwise, return a file name consisting of the
+ N trailing legs of the input file name where N is the config value for
+ "num.of.trailing.legs.to.use".
+
+ @param job
+ the job config
+ @param name
+ the output file name
+ @return the outfile name based on a given anme and the input file name.]]>
+ </doc>
+ </method>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param fs
+ the file system to use
+ @param job
+ a job conf object
+ @param name
+ the name of the file over which a record writer object will be
+ constructed
+ @param arg3
+ a progressable object
+ @return A RecordWriter object over the given file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This abstract class extends the FileOutputFormat, allowing to write the
+ output data to different output files. There are three basic use cases for
+ this class.
+
+ Case one: This class is used for a map reduce job with at least one reducer.
+ The reducer wants to write data to different files depending on the actual
+ keys. It is assumed that a key (or value) encodes the actual key (value)
+ and the desired location for the actual key (value).
+
+ Case two: This class is used for a map only job. The job wants to use an
+ output file name that is either a part of the input file name of the input
+ data, or some derivation of it.
+
+ Case three: This class is used for a map only job. The job wants to use an
+ output file name that depends on both the keys and the input file name,]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputs -->
+ <class name="MultipleOutputs" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleOutputs" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates and initializes multiple named outputs support, it should be
+ instantiated in the Mapper/Reducer configure method.
+
+ @param job the job configuration object]]>
+ </doc>
+ </constructor>
+ <method name="getNamedOutputsList" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Returns list of channel names.
+
+ @param conf job conf
+ @return List of channel Names]]>
+ </doc>
+ </method>
+ <method name="isMultiNamedOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns if a named output is multiple.
+
+ @param conf job conf
+ @param namedOutput named output
+ @return <code>true</code> if the name output is multi, <code>false</code>
+ if it is single. If the name output is not defined it returns
+ <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getNamedOutputFormatClass" return="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputFormat&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the named output OutputFormat.
+
+ @param conf job conf
+ @param namedOutput named output
+ @return namedOutput OutputFormat]]>
+ </doc>
+ </method>
+ <method name="getNamedOutputKeyClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the key class for a named output.
+
+ @param conf job conf
+ @param namedOutput named output
+ @return class for the named output key]]>
+ </doc>
+ </method>
+ <method name="getNamedOutputValueClass" return="java.lang.Class&lt;? extends org.apache.hadoop.io.Writable&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value class for a named output.
+
+ @param conf job conf
+ @param namedOutput named output
+ @return class of named output value]]>
+ </doc>
+ </method>
+ <method name="addNamedOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <param name="outputFormatClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputFormat&gt;"/>
+ <param name="keyClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="valueClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Adds a named output for the job.
+ <p/>
+
+ @param conf job conf to add the named output
+ @param namedOutput named output name, it has to be a word, letters
+ and numbers only, cannot be the word 'part' as
+ that is reserved for the
+ default output.
+ @param outputFormatClass OutputFormat class.
+ @param keyClass key class
+ @param valueClass value class]]>
+ </doc>
+ </method>
+ <method name="addMultiNamedOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <param name="outputFormatClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.OutputFormat&gt;"/>
+ <param name="keyClass" type="java.lang.Class&lt;?&gt;"/>
+ <param name="valueClass" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Adds a multi named output for the job.
+ <p/>
+
+ @param conf job conf to add the named output
+ @param namedOutput named output name, it has to be a word, letters
+ and numbers only, cannot be the word 'part' as
+ that is reserved for the
+ default output.
+ @param outputFormatClass OutputFormat class.
+ @param keyClass key class
+ @param valueClass value class]]>
+ </doc>
+ </method>
+ <method name="setCountersEnabled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="enabled" type="boolean"/>
+ <doc>
+ <![CDATA[Enables or disables counters for the named outputs.
+ <p/>
+ By default these counters are disabled.
+ <p/>
+ MultipleOutputs supports counters, by default the are disabled.
+ The counters group is the {@link MultipleOutputs} class name.
+ </p>
+ The names of the counters are the same as the named outputs. For multi
+ named outputs the name of the counter is the concatenation of the named
+ output, and underscore '_' and the multiname.
+
+ @param conf job conf to enableadd the named output.
+ @param enabled indicates if the counters will be enabled or not.]]>
+ </doc>
+ </method>
+ <method name="getCountersEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Returns if the counters for the named outputs are enabled or not.
+ <p/>
+ By default these counters are disabled.
+ <p/>
+ MultipleOutputs supports counters, by default the are disabled.
+ The counters group is the {@link MultipleOutputs} class name.
+ </p>
+ The names of the counters are the same as the named outputs. For multi
+ named outputs the name of the counter is the concatenation of the named
+ output, and underscore '_' and the multiname.
+
+
+ @param conf job conf to enableadd the named output.
+ @return TRUE if the counters are enabled, FALSE if they are disabled.]]>
+ </doc>
+ </method>
+ <method name="getNamedOutputs" return="java.util.Iterator&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns iterator with the defined name outputs.
+
+ @return iterator with the defined named outputs]]>
+ </doc>
+ </method>
+ <method name="getCollector" return="org.apache.hadoop.mapred.OutputCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="namedOutput" type="java.lang.String"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the output collector for a named output.
+ <p/>
+
+ @param namedOutput the named output name
+ @param reporter the reporter
+ @return the output collector for the given named output
+ @throws IOException thrown if output collector could not be created]]>
+ </doc>
+ </method>
+ <method name="getCollector" return="org.apache.hadoop.mapred.OutputCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="namedOutput" type="java.lang.String"/>
+ <param name="multiName" type="java.lang.String"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the output collector for a multi named output.
+ <p/>
+
+ @param namedOutput the named output name
+ @param multiName the multi name part
+ @param reporter the reporter
+ @return the output collector for the given named output
+ @throws IOException thrown if output collector could not be created]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes all the opened named outputs.
+ <p/>
+ If overriden subclasses must invoke <code>super.close()</code> at the
+ end of their <code>close()</code>
+
+ @throws java.io.IOException thrown if any of the MultipleOutput files
+ could not be closed properly.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MultipleOutputs class simplifies writting to additional outputs other
+ than the job default output via the <code>OutputCollector</code> passed to
+ the <code>map()</code> and <code>reduce()</code> methods of the
+ <code>Mapper</code> and <code>Reducer</code> implementations.
+ <p/>
+ Each additional output, or named output, may be configured with its own
+ <code>OutputFormat</code>, with its own key class and with its own value
+ class.
+ <p/>
+ A named output can be a single file or a multi file. The later is refered as
+ a multi named output.
+ <p/>
+ A multi named output is an unbound set of files all sharing the same
+ <code>OutputFormat</code>, key class and value class configuration.
+ <p/>
+ When named outputs are used within a <code>Mapper</code> implementation,
+ key/values written to a name output are not part of the reduce phase, only
+ key/values written to the job <code>OutputCollector</code> are part of the
+ reduce phase.
+ <p/>
+ MultipleOutputs supports counters, by default the are disabled. The counters
+ group is the {@link MultipleOutputs} class name.
+ </p>
+ The names of the counters are the same as the named outputs. For multi
+ named outputs the name of the counter is the concatenation of the named
+ output, and underscore '_' and the multiname.
+ <p/>
+ Job configuration usage pattern is:
+ <pre>
+
+ JobConf conf = new JobConf();
+
+ conf.setInputPath(inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+
+ conf.setMapperClass(MOMap.class);
+ conf.setReducerClass(MOReduce.class);
+ ...
+
+ // Defines additional single text based output 'text' for the job
+ MultipleOutputs.addNamedOutput(conf, "text", TextOutputFormat.class,
+ LongWritable.class, Text.class);
+
+ // Defines additional multi sequencefile based output 'sequence' for the
+ // job
+ MultipleOutputs.addMultiNamedOutput(conf, "seq",
+ SequenceFileOutputFormat.class,
+ LongWritable.class, Text.class);
+ ...
+
+ JobClient jc = new JobClient();
+ RunningJob job = jc.submitJob(conf);
+
+ ...
+ </pre>
+ <p/>
+ Job configuration usage pattern is:
+ <pre>
+
+ public class MOReduce implements
+ Reducer&lt;WritableComparable, Writable&gt; {
+ private MultipleOutputs mos;
+
+ public void configure(JobConf conf) {
+ ...
+ mos = new MultipleOutputs(conf);
+ }
+
+ public void reduce(WritableComparable key, Iterator&lt;Writable&gt; values,
+ OutputCollector output, Reporter reporter)
+ throws IOException {
+ ...
+ mos.getCollector("text", reporter).collect(key, new Text("Hello"));
+ mos.getCollector("seq", "A", reporter).collect(key, new Text("Bye"));
+ mos.getCollector("seq", "B", reporter).collect(key, new Text("Chau"));
+ ...
+ }
+
+ public void close() throws IOException {
+ mos.close();
+ ...
+ }
+
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputs -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <class name="MultipleSequenceFileOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleSequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output data
+ to different output files in sequence file output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <class name="MultipleTextOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat&lt;K, V&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleTextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output
+ data to different output files in Text output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <class name="MultithreadedMapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable&lt;K1, V1, K2, V2&gt;"/>
+ <constructor name="MultithreadedMapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader&lt;K1, V1&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;K2, V2&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Multithreaded implementation for @link org.apache.hadoop.mapred.MapRunnable.
+ <p>
+ It can be used instead of the default implementation,
+ @link org.apache.hadoop.mapred.MapRunner, when the Map operation is not CPU
+ bound in order to improve throughput.
+ <p>
+ Map implementations using this MapRunnable must be thread-safe.
+ <p>
+ The Map-Reduce job has to be configured to use this MapRunnable class (using
+ the JobConf.setMapRunnerClass method) and
+ the number of thread the thread-pool can use with the
+ <code>mapred.map.multithreadedrunner.threads</code> property, its default
+ value is 10 threads.
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <!-- start class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+ <class name="NLineInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="NLineInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically splits the set of input files for the job, splits N lines
+ of the input as one split.
+
+ @see org.apache.hadoop.mapred.FileInputFormat#getSplits(JobConf, int)]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[NLineInputFormat which splits N lines of input as one split.
+
+ In many "pleasantly" parallel applications, each process/mapper
+ processes the same input file (s), but with computations are
+ controlled by different parameters.(Referred to as "parameter sweeps").
+ One way to achieve this, is to specify a set of parameters
+ (one set per line) as input in a control file
+ (which is the input path to the map-reduce application,
+ where as the input dataset is specified
+ via a config variable in JobConf.).
+
+ The NLineInputFormat can be used in such applications, that splits
+ the input file such that by default, one line is fed as
+ a value to one map task, and key is the offset.
+ i.e. (k,v) is (LongWritable, Text).
+ The location hints will span the whole mapred cluster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <class name="NullOutputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="NullOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[Consume all outputs and put them in /dev/null.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <class name="RegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="RegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+ <class name="TokenCountMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="TokenCountMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that maps text values into <token,freq> pairs. Uses
+ {@link StringTokenizer} to break text into tokens.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.TotalOrderPartitioner -->
+ <class name="TotalOrderPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner&lt;K, V&gt;"/>
+ <constructor name="TotalOrderPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Read in the partition file and build indexing data structures.
+ If the keytype is {@link org.apache.hadoop.io.BinaryComparable} and
+ <tt>total.order.partitioner.natural.order</tt> is not false, a trie
+ of the first <tt>total.order.partitioner.max.trie.depth</tt>(2) + 1 bytes
+ will be built. Otherwise, keys will be located using a binary search of
+ the partition keyset using the {@link org.apache.hadoop.io.RawComparator}
+ defined for this job. The input file must be sorted with the same
+ comparator and contain {@link
+ org.apache.hadoop.mapred.JobConf#getNumReduceTasks} - 1 keys.]]>
+ </doc>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V"/>
+ <param name="numPartitions" type="int"/>
+ </method>
+ <method name="setPartitionFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the path to the SequenceFile storing the sorted partition keyset.
+ It must be the case that for <tt>R</tt> reduces, there are <tt>R-1</tt>
+ keys in the SequenceFile.]]>
+ </doc>
+ </method>
+ <method name="getPartitionFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the path to the SequenceFile storing the sorted partition keyset.
+ @see #setPartitionFile(JobConf,Path)]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_PATH" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Partitioner effecting a total order by reading split points from
+ an externally generated source.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.TotalOrderPartitioner -->
+</package>
+<package name="org.apache.hadoop.mapred.lib.aggregate">
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <class name="DoubleValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="DoubleValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a double value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="double"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a double value.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getSum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up a sequence of double
+ values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <class name="LongValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the maximum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <class name="LongValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the minimum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <class name="LongValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getSum" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <class name="StringValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the biggest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <class name="StringValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the smallest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <class name="UniqValueCount" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="UniqValueCount"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UniqValueCount" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor
+ @param maxNum the limit in the number of unique values to keep.]]>
+ </doc>
+ </constructor>
+ <method name="setMaxItems" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <doc>
+ <![CDATA[Set the limit on the number of unique values
+ @param n the desired limit on the number of unique values
+ @return the new limit on the number of unique values]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return the number of unique objects aggregated]]>
+ </doc>
+ </method>
+ <method name="getUniqueItems" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the set of the unique objects]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of the unique objects. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that dedupes a sequence of objects.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <class name="UserDefinedValueAggregatorDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="UserDefinedValueAggregatorDescriptor" type="java.lang.String, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param className the class name of the user defined descriptor class
+ @param job a configure object used for decriptor configuration]]>
+ </doc>
+ </constructor>
+ <method name="createInstance" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="className" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create an instance of the given class
+ @param className the name of the class
+ @return a dynamically created instance of the given class]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pairs
+ by delegating the invocation to the real object.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a wrapper for a user defined value aggregator descriptor.
+ It servs two functions: One is to create an object of ValueAggregatorDescriptor from the
+ name of a user defined class that may be dynamically loaded. The other is to
+ deligate inviokations of generateKeyValPairs function to the created object.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <interface name="ValueAggregator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val the value to be added]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the agregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return an array of values as the outputs of the combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface defines the minimal protocol for value aggregators.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <class name="ValueAggregatorBaseDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="ValueAggregatorBaseDescriptor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="generateEntry" return="java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <param name="id" type="java.lang.String"/>
+ <param name="val" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @param id the aggregation id
+ @param val the val associated with the id to be aggregated
+ @return an Entry whose key is the aggregation id prefixed with
+ the aggregation type.]]>
+ </doc>
+ </method>
+ <method name="generateValueAggregator" return="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @return a value aggregator of the given type.]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate 1 or 2 aggregation-id/value pairs for the given key/value pair.
+ The first id will be of type LONG_VALUE_SUM, with "record_count" as
+ its aggregation id. If the input is a file split,
+ the second id of the same type will be generated too, with the file name
+ as its aggregation id. This achieves the behavior of counting the total number
+ of records in the input data, and the number of records in each input file.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[get the input file name.
+
+ @param job a job configuration object]]>
+ </doc>
+ </method>
+ <field name="UNIQ_VALUE_COUNT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VALUE_HISTOGRAM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputFile" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements the common functionalities of
+ the subclasses of ValueAggregatorDescriptor class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <class name="ValueAggregatorCombiner" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorCombiner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Combiner does not need to configure.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Combines values for a given key.
+ @param key the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values.
+ @param values the values to combine
+ @param output to collect combined values]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic combiner of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <interface name="ValueAggregatorDescriptor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="generateKeyValPairs" return="java.util.ArrayList&lt;java.util.Map.Entry&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pair.
+ This function is usually called by the mapper of an Aggregate based job.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configure the object
+
+ @param job
+ a JobConf object that may contain the information that can be used
+ to configure the object.]]>
+ </doc>
+ </method>
+ <field name="TYPE_SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ONE" type="org.apache.hadoop.io.Text"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This interface defines the contract a value aggregator descriptor must
+ support. Such a descriptor can be configured with a JobConf object. Its main
+ function is to generate a list of aggregation-id/value pairs. An aggregation
+ id encodes an aggregation type which is used to guide the way to aggregate
+ the value in the reduce/combiner phrase of an Aggregate based job.The mapper in
+ an Aggregate based map/reduce job may create one or more of
+ ValueAggregatorDescriptor objects at configuration time. For each input
+ key/value pair, the mapper will use those objects to create aggregation
+ id/value pairs.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <class name="ValueAggregatorJob" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorJob"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation. Generic hadoop
+ arguments are accepted.
+ @return a JobConf object ready for submission.
+
+ @throws IOException
+ @see GenericOptionsParser]]>
+ </doc>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setAggregatorDescriptors"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create and run an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the main class for creating a map/reduce job using Aggregate
+ framework. The Aggregate is a specialization of map/reduce framework,
+ specilizing for performing various simple aggregations.
+
+ Generally speaking, in order to implement an application using Map/Reduce
+ model, the developer is to implement Map and Reduce functions (and possibly
+ combine function). However, a lot of applications related to counting and
+ statistics computing have very similar characteristics. Aggregate abstracts
+ out the general patterns of these functions and implementing those patterns.
+ In particular, the package provides generic mapper/redducer/combiner classes,
+ and a set of built-in value aggregators, and a generic utility class that
+ helps user create map/reduce jobs using the generic class. The built-in
+ aggregators include:
+
+ sum over numeric values count the number of distinct values compute the
+ histogram of values compute the minimum, maximum, media,average, standard
+ deviation of numeric values
+
+ The developer using Aggregate will need only to provide a plugin class
+ conforming to the following interface:
+
+ public interface ValueAggregatorDescriptor { public ArrayList<Entry>
+ generateKeyValPairs(Object key, Object value); public void
+ configure(JobConfjob); }
+
+ The package also provides a base class, ValueAggregatorBaseDescriptor,
+ implementing the above interface. The user can extend the base class and
+ implement generateKeyValPairs accordingly.
+
+ The primary work of generateKeyValPairs is to emit one or more key/value
+ pairs based on the input key/value pair. The key in an output key/value pair
+ encode two pieces of information: aggregation type and aggregation id. The
+ value will be aggregated onto the aggregation id according the aggregation
+ type.
+
+ This class offers a function to generate a map/reduce job using Aggregate
+ framework. The function takes the following parameters: input directory spec
+ input format (text or sequence file) output directory a file specifying the
+ user plugin class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <class name="ValueAggregatorJobBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K1, V1, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <implements name="org.apache.hadoop.mapred.Reducer&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <constructor name="ValueAggregatorJobBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="logSpec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="aggregatorDescriptorList" type="java.util.ArrayList&lt;org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor&gt;"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This abstract class implements some common functionalities of the
+ the generic mapper, reducer and combiner classes of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <class name="ValueAggregatorMapper" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[the map function. It iterates through the value aggregator descriptor
+ list to generate aggregation id/value pairs and emit them.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="org.apache.hadoop.io.Text"/>
+ <param name="arg1" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic mapper of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <class name="ValueAggregatorReducer" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase&lt;K1, V1&gt;"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator&lt;org.apache.hadoop.io.Text&gt;"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param key
+ the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values. In effect, data
+ driven computing is achieved. It is assumed that each aggregator's
+ getReport method emits appropriate output for the aggregator. This
+ may be further customiized.
+ @value the values to be aggregated]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.Text&gt;"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic reducer of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+ <class name="ValueHistogram" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="ValueHistogram"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add the given val to the aggregator.
+
+ @param val the value to be added. It is expected to be a string
+ in the form of xxxx\tnum, meaning xxxx has num occurrences.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this aggregator.
+ It includes the following basic statistics of the histogram:
+ the number of unique values
+ the minimum value
+ the media value
+ the maximum value
+ the average value
+ the standard deviation]]>
+ </doc>
+ </method>
+ <method name="getReportDetails" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a string representation of the list of value/frequence pairs of
+ the histogram]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a list value/frequence pairs.
+ The return value is expected to be used by the reducer.]]>
+ </doc>
+ </method>
+ <method name="getReportItems" return="java.util.TreeMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a TreeMap representation of the histogram]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that computes the
+ histogram of a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+</package>
+<package name="org.apache.hadoop.mapred.lib.db">
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBConfiguration -->
+ <class name="DBConfiguration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="configureDB"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="driverClass" type="java.lang.String"/>
+ <param name="dbUrl" type="java.lang.String"/>
+ <param name="userName" type="java.lang.String"/>
+ <param name="passwd" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the DB access related fields in the JobConf.
+ @param job the job
+ @param driverClass JDBC Driver class name
+ @param dbUrl JDBC DB access URL.
+ @param userName DB access username
+ @param passwd DB access passwd]]>
+ </doc>
+ </method>
+ <method name="configureDB"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="driverClass" type="java.lang.String"/>
+ <param name="dbUrl" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the DB access related fields in the JobConf.
+ @param job the job
+ @param driverClass JDBC Driver class name
+ @param dbUrl JDBC DB access URL.]]>
+ </doc>
+ </method>
+ <field name="DRIVER_CLASS_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The JDBC Driver class name]]>
+ </doc>
+ </field>
+ <field name="URL_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[JDBC Database access URL]]>
+ </doc>
+ </field>
+ <field name="USERNAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[User name to access the database]]>
+ </doc>
+ </field>
+ <field name="PASSWORD_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Password to access the database]]>
+ </doc>
+ </field>
+ <field name="INPUT_TABLE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Input table name]]>
+ </doc>
+ </field>
+ <field name="INPUT_FIELD_NAMES_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Field names in the Input table]]>
+ </doc>
+ </field>
+ <field name="INPUT_CONDITIONS_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[WHERE clause in the input SELECT statement]]>
+ </doc>
+ </field>
+ <field name="INPUT_ORDER_BY_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[ORDER BY clause in the input SELECT statement]]>
+ </doc>
+ </field>
+ <field name="INPUT_QUERY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Whole input query, exluding LIMIT...OFFSET]]>
+ </doc>
+ </field>
+ <field name="INPUT_COUNT_QUERY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Input query to get the count of records]]>
+ </doc>
+ </field>
+ <field name="INPUT_CLASS_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Class name implementing DBWritable which will hold input tuples]]>
+ </doc>
+ </field>
+ <field name="OUTPUT_TABLE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Output table name]]>
+ </doc>
+ </field>
+ <field name="OUTPUT_FIELD_NAMES_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Field names in the Output table]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A container for configuration property names for jobs with DB input/output.
+ <br>
+ The job can be configured using the static methods in this class,
+ {@link DBInputFormat}, and {@link DBOutputFormat}.
+ <p>
+ Alternatively, the properties can be set in the configuration with proper
+ values.
+
+ @see DBConfiguration#configureDB(JobConf, String, String, String, String)
+ @see DBInputFormat#setInput(JobConf, Class, String, String)
+ @see DBInputFormat#setInput(JobConf, Class, String, String, String, String...)
+ @see DBOutputFormat#setOutput(JobConf, String, String...)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBConfiguration -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat -->
+ <class name="DBInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat&lt;org.apache.hadoop.io.LongWritable, T&gt;"/>
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="DBInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="chunks" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getCountQuery" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the query for getting the total number of rows,
+ subclasses can override this for custom behaviour.]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.lib.db.DBWritable&gt;"/>
+ <param name="tableName" type="java.lang.String"/>
+ <param name="conditions" type="java.lang.String"/>
+ <param name="orderBy" type="java.lang.String"/>
+ <param name="fieldNames" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Initializes the map-part of the job with the appropriate input settings.
+
+ @param job The job
+ @param inputClass the class object implementing DBWritable, which is the
+ Java object holding tuple fields.
+ @param tableName The table to read data from
+ @param conditions The condition which to select data with, eg. '(updated >
+ 20070101 AND length > 0)'
+ @param orderBy the fieldNames in the orderBy clause.
+ @param fieldNames The field names in the table
+ @see #setInput(JobConf, Class, String, String)]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputClass" type="java.lang.Class&lt;? extends org.apache.hadoop.mapred.lib.db.DBWritable&gt;"/>
+ <param name="inputQuery" type="java.lang.String"/>
+ <param name="inputCountQuery" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Initializes the map-part of the job with the appropriate input settings.
+
+ @param job The job
+ @param inputClass the class object implementing DBWritable, which is the
+ Java object holding tuple fields.
+ @param inputQuery the input query to select fields. Example :
+ "SELECT f1, f2, f3 FROM Mytable ORDER BY f1"
+ @param inputCountQuery the input query that returns the number of records in
+ the table.
+ Example : "SELECT COUNT(f1) FROM Mytable"
+ @see #setInput(JobConf, Class, String, String, String, String...)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A InputFormat that reads input data from an SQL table.
+ <p>
+ DBInputFormat emits LongWritables containing the record number as
+ key and DBWritables as value.
+
+ The SQL query, and input class can be using one of the two
+ setInput methods.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat.DBInputSplit -->
+ <class name="DBInputFormat.DBInputSplit" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="DBInputFormat.DBInputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DBInputFormat.DBInputSplit" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convenience Constructor
+ @param start the index of the first row to select
+ @param end the index of the last row to select]]>
+ </doc>
+ </constructor>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getStart" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The index of the first row to select]]>
+ </doc>
+ </method>
+ <method name="getEnd" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The index of the last row to select]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return The total row count in this split]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="output" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A InputSplit that spans a set of rows]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat.DBInputSplit -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat.DBRecordReader -->
+ <class name="DBInputFormat.DBRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader&lt;org.apache.hadoop.io.LongWritable, T&gt;"/>
+ <constructor name="DBInputFormat.DBRecordReader" type="org.apache.hadoop.mapred.lib.db.DBInputFormat.DBInputSplit, java.lang.Class&lt;T&gt;, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ <doc>
+ <![CDATA[@param split The InputSplit to read data for
+ @throws SQLException]]>
+ </doc>
+ </constructor>
+ <method name="getSelectQuery" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the query for selecting the records,
+ subclasses can override this for custom behaviour.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createKey" return="org.apache.hadoop.io.LongWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createValue" return="T extends org.apache.hadoop.mapred.lib.db.DBWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.LongWritable"/>
+ <param name="value" type="T extends org.apache.hadoop.mapred.lib.db.DBWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A RecordReader that reads records from a SQL table.
+ Emits LongWritables containing the record number as
+ key and DBWritables as value.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat.DBRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat.NullDBWritable -->
+ <class name="DBInputFormat.NullDBWritable" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.db.DBWritable"/>
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="DBInputFormat.NullDBWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="java.sql.ResultSet"/>
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="java.sql.PreparedStatement"/>
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ </method>
+ <doc>
+ <![CDATA[A Class that does nothing, implementing DBWritable]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat.NullDBWritable -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBOutputFormat -->
+ <class name="DBOutputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat&lt;K, V&gt;"/>
+ <constructor name="DBOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="constructQuery" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="table" type="java.lang.String"/>
+ <param name="fieldNames" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Constructs the query used as the prepared statement to insert data.
+
+ @param table
+ the table to insert into
+ @param fieldNames
+ the fields to insert into. If field names are unknown, supply an
+ array of nulls.]]>
+ </doc>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filesystem" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filesystem" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="tableName" type="java.lang.String"/>
+ <param name="fieldNames" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Initializes the reduce-part of the job with the appropriate output settings
+
+ @param job
+ The job
+ @param tableName
+ The table to insert data into
+ @param fieldNames
+ The field names in the table. If unknown, supply the appropriate
+ number of nulls.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A OutputFormat that sends the reduce output to a SQL table.
+ <p>
+ {@link DBOutputFormat} accepts &lt;key,value&gt; pairs, where
+ key has a type extending DBWritable. Returned {@link RecordWriter}
+ writes <b>only the key</b> to the database with a batch SQL query.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBOutputFormat.DBRecordWriter -->
+ <class name="DBOutputFormat.DBRecordWriter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordWriter&lt;K, V&gt;"/>
+ <constructor name="DBOutputFormat.DBRecordWriter" type="java.sql.Connection, java.sql.PreparedStatement"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.mapred.lib.db.DBWritable"/>
+ <param name="value" type="V"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A RecordWriter that writes the reduce output to a SQL table]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBOutputFormat.DBRecordWriter -->
+ <!-- start interface org.apache.hadoop.mapred.lib.db.DBWritable -->
+ <interface name="DBWritable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="statement" type="java.sql.PreparedStatement"/>
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ <doc>
+ <![CDATA[Sets the fields of the object in the {@link PreparedStatement}.
+ @param statement the statement that the fields are put into.
+ @throws SQLException]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="resultSet" type="java.sql.ResultSet"/>
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ <doc>
+ <![CDATA[Reads the fields of the object from the {@link ResultSet}.
+ @param resultSet the {@link ResultSet} to get the fields from.
+ @throws SQLException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Objects that are read from/written to a database should implement
+ <code>DBWritable</code>. DBWritable, is similar to {@link Writable}
+ except that the {@link #write(PreparedStatement)} method takes a
+ {@link PreparedStatement}, and {@link #readFields(ResultSet)}
+ takes a {@link ResultSet}.
+ <p>
+ Implementations are responsible for writing the fields of the object
+ to PreparedStatement, and reading the fields of the object from the
+ ResultSet.
+
+ <p>Example:</p>
+ If we have the following table in the database :
+ <pre>
+ CREATE TABLE MyTable (
+ counter INTEGER NOT NULL,
+ timestamp BIGINT NOT NULL,
+ );
+ </pre>
+ then we can read/write the tuples from/to the table with :
+ <p><pre>
+ public class MyWritable implements Writable, DBWritable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ //Writable#write() implementation
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ //Writable#readFields() implementation
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public void write(PreparedStatement statement) throws SQLException {
+ statement.setInt(1, counter);
+ statement.setLong(2, timestamp);
+ }
+
+ public void readFields(ResultSet resultSet) throws SQLException {
+ counter = resultSet.getInt(1);
+ timestamp = resultSet.getLong(2);
+ }
+ }
+ </pre></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.db.DBWritable -->
+</package>
+<package name="org.apache.hadoop.mapred.pipes">
+ <!-- start class org.apache.hadoop.mapred.pipes.Submitter -->
+ <class name="Submitter" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="Submitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Submitter" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExecutable" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the URI of the application's executable.
+ @param conf
+ @return the URI where the application's executable is located]]>
+ </doc>
+ </method>
+ <method name="setExecutable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="executable" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the URI for the application's executable. Normally this is a hdfs:
+ location.
+ @param conf
+ @param executable The URI of the application's executable.]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job is using a Java RecordReader.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordReader" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java RecordReader
+ @param conf the configuration to check
+ @return is it a Java RecordReader?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Mapper is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaMapper" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Mapper.
+ @param conf the configuration to check
+ @return is it a Java Mapper?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaReducer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Reducer is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaReducer" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Reducer.
+ @param conf the configuration to check
+ @return is it a Java Reducer?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job will use a Java RecordWriter.
+ @param conf the configuration to modify
+ @param value the new value to set]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordWriter" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Will the reduce use a Java RecordWriter?
+ @param conf the configuration to check
+ @return true, if the output of the job will be written by Java]]>
+ </doc>
+ </method>
+ <method name="getKeepCommandFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Does the user want to keep the command file for debugging? If this is
+ true, pipes will write a copy of the command data to a file in the
+ task directory named "downlink.data", which may be used to run the C++
+ program under the debugger. You probably also want to set
+ JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from
+ being deleted.
+ To run using the data file, set the environment variable
+ "hadoop.pipes.command.file" to point to the file.
+ @param conf the configuration to check
+ @return will the framework save the command file?]]>
+ </doc>
+ </method>
+ <method name="setKeepCommandFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether to keep the command file for debugging
+ @param conf the configuration to modify
+ @param keep the new value]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link Submitter#runJob(JobConf)}">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications
+ to the job to run under pipes are made to the configuration.
+ @param conf the job to submit to the cluster (MODIFIED)
+ @throws IOException
+ @deprecated Use {@link Submitter#runJob(JobConf)}]]>
+ </doc>
+ </method>
+ <method name="runJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications
+ to the job to run under pipes are made to the configuration.
+ @param conf the job to submit to the cluster (MODIFIED)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="jobSubmit" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the Map-Reduce framework.
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param conf the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Submit a pipes job based on the command line arguments.
+ @param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The main entry point and job submitter. It may either be used as a command
+ line-based or API-based method to launch Pipes jobs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.pipes.Submitter -->
+</package>
+<package name="org.apache.hadoop.metrics">
+ <!-- start class org.apache.hadoop.metrics.ContextFactory -->
+ <class name="ContextFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ContextFactory"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of ContextFactory]]>
+ </doc>
+ </constructor>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the named attribute, or null if there is no
+ attribute of that name.
+
+ @param attributeName the attribute name
+ @return the attribute value]]>
+ </doc>
+ </method>
+ <method name="getAttributeNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all the factory's attributes.
+
+ @return the attribute names]]>
+ </doc>
+ </method>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Sets the named factory attribute to the specified value, creating it
+ if it did not already exist. If the value is null, this is the same as
+ calling removeAttribute.
+
+ @param attributeName the attribute name
+ @param value the new attribute value]]>
+ </doc>
+ </method>
+ <method name="removeAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes the named attribute if it exists.
+
+ @param attributeName the attribute name]]>
+ </doc>
+ </method>
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <exception name="InstantiationException" type="java.lang.InstantiationException"/>
+ <exception name="IllegalAccessException" type="java.lang.IllegalAccessException"/>
+ <doc>
+ <![CDATA[Returns the named MetricsContext instance, constructing it if necessary
+ using the factory's current configuration attributes. <p/>
+
+ When constructing the instance, if the factory property
+ <i>contextName</i>.class</code> exists,
+ its value is taken to be the name of the class to instantiate. Otherwise,
+ the default is to create an instance of
+ <code>org.apache.hadoop.metrics.spi.NullContext</code>, which is a
+ dummy "no-op" context which will cause all metric data to be discarded.
+
+ @param contextName the name of the context
+ @return the named MetricsContext]]>
+ </doc>
+ </method>
+ <method name="getNullContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a "null" context - one which does nothing.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the singleton ContextFactory instance, constructing it if
+ necessary. <p/>
+
+ When the instance is constructed, this method checks if the file
+ <code>hadoop-metrics.properties</code> exists on the class path. If it
+ exists, it must be in the format defined by java.util.Properties, and all
+ the properties in the file are set as attributes on the newly created
+ ContextFactory instance.
+
+ @return the singleton ContextFactory instance]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factory class for creating MetricsContext objects. To obtain an instance
+ of this class, use the static <code>getFactory()</code> method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ContextFactory -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsContext -->
+ <interface name="MetricsContext" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.
+
+ @return the context name]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records as they are
+ updated.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free any data that the implementation
+ may have buffered for sending at the next timer event. It
+ is OK to call <code>startMonitoring()</code> again after calling
+ this.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and also frees any buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new MetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at regular time intervals, as
+ determined by the implementation-class specific configuration.
+
+ @param updater object to be run periodically; it should updated
+ some metrics records and then return]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_PERIOD" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default period in seconds at which data is sent to the metrics system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The main interface to the metrics package.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.MetricsException -->
+ <class name="MetricsException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException]]>
+ </doc>
+ </constructor>
+ <constructor name="MetricsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException
+
+ @param message an error message]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[General-purpose, unchecked metrics exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsException -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsRecord -->
+ <interface name="MetricsRecord" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value. The tagValue may be null,
+ which is treated the same as an empty String.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.
+
+ @param tagName name of a tag]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes, from the buffered data table, all rows having tags
+ that equal the tags that have been set on this record. For example,
+ if there are no tags on this record, all rows for this record name
+ would be removed. Or, if there is a single tag on this record, then
+ just rows containing a tag with the same name and value would be removed.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A named and optionally tagged set of records to be sent to the metrics
+ system. <p/>
+
+ A record name identifies the kind of data to be reported. For example, a
+ program reporting statistics relating to the disks on a computer might use
+ a record name "diskStats".<p/>
+
+ A record has zero or more <i>tags</i>. A tag has a name and a value. To
+ continue the example, the "diskStats" record might use a tag named
+ "diskName" to identify a particular disk. Sometimes it is useful to have
+ more than one tag, so there might also be a "diskType" with value "ide" or
+ "scsi" or whatever.<p/>
+
+ A record also has zero or more <i>metrics</i>. These are the named
+ values that are to be reported to the metrics system. In the "diskStats"
+ example, possible metric names would be "diskPercentFull", "diskPercentBusy",
+ "kbReadPerSecond", etc.<p/>
+
+ The general procedure for using a MetricsRecord is to fill in its tag and
+ metric values, and then call <code>update()</code> to pass the record to the
+ client library.
+ Metric data is not immediately sent to the metrics system
+ each time that <code>update()</code> is called.
+ An internal table is maintained, identified by the record name. This
+ table has columns
+ corresponding to the tag and the metric names, and rows
+ corresponding to each unique set of tag values. An update
+ either modifies an existing row in the table, or adds a new row with a set of
+ tag values that are different from all the other rows. Note that if there
+ are no tags, then there can be at most one row in the table. <p/>
+
+ Once a row is added to the table, its data will be sent to the metrics system
+ on every timer period, whether or not it has been updated since the previous
+ timer period. If this is inappropriate, for example if metrics were being
+ reported by some transient object in an application, the <code>remove()</code>
+ method can be used to remove the row and thus stop the data from being
+ sent.<p/>
+
+ Note that the <code>update()</code> method is atomic. This means that it is
+ safe for different threads to be updating the same metric. More precisely,
+ it is OK for different threads to call <code>update()</code> on MetricsRecord instances
+ with the same set of tag names and tag values. Different threads should
+ <b>not</b> use the same MetricsRecord instance at the same time.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsRecord -->
+ <!-- start class org.apache.hadoop.metrics.MetricsUtil -->
+ <class name="MetricsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to return the named context.
+ If the desired context cannot be created for any reason, the exception
+ is logged, and a null context is returned.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to create and return new metrics record instance within the
+ given context. This record is tagged with the host name.
+
+ @param context the context
+ @param recordName name of the record
+ @return newly created metrics record]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility class to simplify creation and reporting of hadoop metrics.
+
+ For examples of usage, see NameNodeMetrics.
+ @see org.apache.hadoop.metrics.MetricsRecord
+ @see org.apache.hadoop.metrics.MetricsContext
+ @see org.apache.hadoop.metrics.ContextFactory]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsUtil -->
+ <!-- start interface org.apache.hadoop.metrics.Updater -->
+ <interface name="Updater" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Timer-based call-back from the metric library.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Call-back interface. See <code>MetricsContext.registerUpdater()</code>.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.Updater -->
+</package>
+<package name="org.apache.hadoop.metrics.file">
+ <!-- start class org.apache.hadoop.metrics.file.FileContext -->
+ <class name="FileContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="getFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the configured file name, or null.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, by opening in append-mode, the
+ file specified by the <code>fileName</code> attribute,
+ if specified. Otherwise the data will be written to standard
+ output.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring, closing the file.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Emits a metrics record to a file.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Flushes the output writer, forcing updates to disk.]]>
+ </doc>
+ </method>
+ <field name="FILE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="PERIOD_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Metrics context for writing metrics to a file.<p/>
+
+ This class is configured by setting ContextFactory attributes which in turn
+ are usually configured through a properties file. All the attributes are
+ prefixed by the contextName. For example, the properties file might contain:
+ <pre>
+ myContextName.fileName=/tmp/metrics.log
+ myContextName.period=5
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.file.FileContext -->
+</package>
+<package name="org.apache.hadoop.metrics.ganglia">
+ <!-- start class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+ <class name="GangliaContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GangliaContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of GangliaContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Context for sending metrics to Ganglia.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+</package>
+<package name="org.apache.hadoop.metrics.jvm">
+ <!-- start class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <class name="EventCounter" extends="org.apache.log4j.AppenderSkeleton"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="EventCounter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getFatal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getError" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWarn" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfo" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="requiresLayout" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A log4J Appender that simply counts logging events in three levels:
+ fatal, error and warn.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <!-- start class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+ <class name="JvmMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="init" return="org.apache.hadoop.metrics.jvm.JvmMetrics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="processName" type="java.lang.String"/>
+ <param name="sessionId" type="java.lang.String"/>
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[This will be called periodically (with the period being configuration
+ dependent).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Singleton class which eports Java Virtual Machine metrics to the metrics API.
+ Any application can create an instance of this class in order to emit
+ Java VM metrics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+</package>
+<package name="org.apache.hadoop.metrics.spi">
+ <!-- start class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <class name="AbstractMetricsContext" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsContext"/>
+ <constructor name="AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of AbstractMetricsContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ <doc>
+ <![CDATA[Initializes the context.]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for subclasses to access factory attributes.]]>
+ </doc>
+ </method>
+ <method name="getAttributeTable" return="java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="tableName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an attribute-value map derived from the factory attributes
+ by finding all factory attributes that begin with
+ <i>contextName</i>.<i>tableName</i>. The returned map consists of
+ those attributes with the contextName and tableName stripped off.]]>
+ </doc>
+ </method>
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.]]>
+ </doc>
+ </method>
+ <method name="getContextFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the factory by which this context was created.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free buffered data.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and frees buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new AbstractMetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="newRecord" return="org.apache.hadoop.metrics.spi.MetricsRecordImpl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Subclasses should override this if they subclass MetricsRecordImpl.
+ @param recordName the name of the record
+ @return newly created instance of MetricsRecordImpl or subclass]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at time intervals determined by
+ the configuration.
+
+ @param updater object to be run periodically; it should update
+ some metrics records]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sends a record to the metrics system.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called each period after all records have been emitted, this method does nothing.
+ Subclasses may override it in order to perform some kind of flush.]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.update(). Creates or updates a row in
+ the internal table of metric data.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.remove(). Removes all matching rows in
+ the internal table of metric data. A row matches if it has the same
+ tag names and values as record, but it may also have additional
+ tags.]]>
+ </doc>
+ </method>
+ <method name="getPeriod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the timer period.]]>
+ </doc>
+ </method>
+ <method name="setPeriod"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="period" type="int"/>
+ <doc>
+ <![CDATA[Sets the timer period]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The main class of the Service Provider Interface. This class should be
+ extended in order to integrate the Metrics API with a specific metrics
+ client library. <p/>
+
+ This class implements the internal table of metric data, and the timer
+ on which data is to be sent to the metrics system. Subclasses must
+ override the abstract <code>emitRecord</code> method in order to transmit
+ the data. <p/>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <class name="MetricsRecordImpl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsRecord"/>
+ <constructor name="MetricsRecordImpl" type="java.lang.String, org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileRecord]]>
+ </doc>
+ </constructor>
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes the row, if it exists, in the buffered data table having tags
+ that equal the tags that have been set on this record.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of MetricsRecord. Keeps a back-pointer to the context
+ from which it was created, and delegates back to it on <code>update</code>
+ and <code>remove()</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricValue -->
+ <class name="MetricValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricValue" type="java.lang.Number, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricValue]]>
+ </doc>
+ </constructor>
+ <method name="isIncrement" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumber" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="ABSOLUTE" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCREMENT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Number that is either an absolute or an incremental amount.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricValue -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContext -->
+ <class name="NullContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContext]]>
+ </doc>
+ </constructor>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do-nothing version of startMonitoring]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Null metrics context: a metrics context which does nothing. Used as the
+ default context, so that no performance data is emitted if no configuration
+ data is found.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <class name="NullContextWithUpdateThread" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContextWithUpdateThread"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContextWithUpdateThread]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A null context which has a thread calling
+ periodically when monitoring is started. This keeps the data sampled
+ correctly.
+ In all other respects, this is like the NULL context: No data is emitted.
+ This is suitable for Monitoring systems like JMX which reads the metrics
+ when someone reads the data from JMX.
+
+ The default impl of start and stop monitoring:
+ is the AbstractMetricsContext is good enough.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <!-- start class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <class name="OutputRecord" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTagNames" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of tag names]]>
+ </doc>
+ </method>
+ <method name="getTag" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a tag object which is can be a String, Integer, Short or Byte.
+
+ @return the tag value, or null if there is no such tag]]>
+ </doc>
+ </method>
+ <method name="getMetricNames" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of metric names.]]>
+ </doc>
+ </method>
+ <method name="getMetric" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the metric object which can be a Float, Integer, Short or Byte.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents a record of metric data to be sent to a metrics system.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <!-- start class org.apache.hadoop.metrics.spi.Util -->
+ <class name="Util" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="parse" return="java.util.List&lt;java.net.InetSocketAddress&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="specs" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Parses a space and/or comma separated sequence of server specifications
+ of the form <i>hostname</i> or <i>hostname:port</i>. If
+ the specs string is null, defaults to localhost:defaultPort.
+
+ @return a list of InetSocketAddress objects.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Static utility methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.Util -->
+</package>
+<package name="org.apache.hadoop.metrics.util">
+ <!-- start class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <class name="MBeanUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MBeanUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="registerMBean" return="javax.management.ObjectName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="serviceName" type="java.lang.String"/>
+ <param name="nameName" type="java.lang.String"/>
+ <param name="theMbean" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Register the mbean using out standard MBeanName format
+ "hadoop.dfs:service=<serviceName>,name=<nameName>"
+ Where the <serviceName> and <nameName> are the supplied parameters
+
+ @param serviceName
+ @param nameName
+ @param theMbean - the MBean to register
+ @return the named used to register the MBean]]>
+ </doc>
+ </method>
+ <method name="unregisterMBean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mbeanName" type="javax.management.ObjectName"/>
+ </method>
+ <doc>
+ <![CDATA[This util class provides a method to register an MBean using
+ our standard naming convention as described in the doc
+ for {link {@link #registerMBean(String, String, Object)}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <class name="MetricsIntValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsIntValue" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="int"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - value to be added]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param decr - value to subtract]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Dec metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsIntValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsLongValue -->
+ <class name="MetricsLongValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsLongValue" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="long"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - value to be added]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decr" type="long"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param decr - value to subtract]]>
+ </doc>
+ </method>
+ <method name="dec"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Dec metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsLongValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsLongValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <class name="MetricsTimeVaryingInt" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingInt" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - number of operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #previousIntervalValue}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Value at the Previous interval
+ @return prev interval value]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingInt class is for a metric that naturally
+ varies over time (e.g. number of files created).
+ The metric is is published at interval heart beat (the interval
+ is set in the metrics config file).
+ Note if one wants a time associated with the metric then use
+ @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+ <class name="MetricsTimeVaryingRate" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingRate" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param n the name of the metrics to be used to publish the metric]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numOps" type="int"/>
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for numOps operations
+ @param numOps - number of operations
+ @param time - time for numOps operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for one operation
+ @param time for one operation]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and
+ {@link #getPreviousIntervalNumOps()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalNumOps" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of operations in the previous interval
+ @return - ops in prev interval]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalAverageTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The average rate of an operation in the previous interval
+ @return - the average rate.]]>
+ </doc>
+ </method>
+ <method name="getMinTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The min time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return min time for an operation]]>
+ </doc>
+ </method>
+ <method name="getMaxTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The max time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return max time for an operation]]>
+ </doc>
+ </method>
+ <method name="resetMinMax"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the min max values]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingRate class is for a rate based metric that
+ naturally varies over time (e.g. time taken to create a file).
+ The rate is averaged at each interval heart beat (the interval
+ is set in the metrics config file).
+ This class also keeps track of the min and max rates along with
+ a method to reset the min-max.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+</package>
+<package name="org.apache.hadoop.net">
+ <!-- start class org.apache.hadoop.net.CachedDNSToSwitchMapping -->
+ <class name="CachedDNSToSwitchMapping" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.DNSToSwitchMapping"/>
+ <constructor name="CachedDNSToSwitchMapping" type="org.apache.hadoop.net.DNSToSwitchMapping"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="resolve" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List&lt;java.lang.String&gt;"/>
+ </method>
+ <field name="rawMapping" type="org.apache.hadoop.net.DNSToSwitchMapping"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A cached implementation of DNSToSwitchMapping that takes an
+ raw DNSToSwitchMapping and stores the resolved network location in
+ a cache. The following calls to a resolved network location
+ will get its location from the cache.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.CachedDNSToSwitchMapping -->
+ <!-- start class org.apache.hadoop.net.DNS -->
+ <class name="DNS" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DNS"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reverseDns" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hostIp" type="java.net.InetAddress"/>
+ <param name="ns" type="java.lang.String"/>
+ <exception name="NamingException" type="javax.naming.NamingException"/>
+ <doc>
+ <![CDATA[Returns the hostname associated with the specified IP address by the
+ provided nameserver.
+
+ @param hostIp
+ The address to reverse lookup
+ @param ns
+ The host name of a reachable DNS server
+ @return The host name associated with the provided IP
+ @throws NamingException
+ If a NamingException is encountered]]>
+ </doc>
+ </method>
+ <method name="getIPs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the IPs associated with the provided interface, if any, in
+ textual form.
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return A string vector of all the IPs associated with the provided
+ interface
+ @throws UnknownHostException
+ If an UnknownHostException is encountered in querying the
+ default interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultIP" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the first available IP address associated with the provided
+ network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The IP address in text form
+ @throws UnknownHostException
+ If one is encountered in querying the default interface]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the provided nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return A string vector of all host names associated with the IPs tied to
+ the specified interface
+ @throws UnknownHostException]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the default nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The list of host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the provided
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return The default host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the default
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The default host name associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides direct and reverse lookup functionalities, allowing
+ the querying of specific network interfaces or nameservers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.DNS -->
+ <!-- start interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <interface name="DNSToSwitchMapping" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="resolve" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List&lt;java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[Resolves a list of DNS-names/IP-addresses and returns back a list of
+ switch information (network paths). One-to-one correspondence must be
+ maintained between the elements in the lists.
+ Consider an element in the argument list - x.y.com. The switch information
+ that is returned must be a network path of the form /foo/rack,
+ where / is the root, and 'foo' is the switch where 'rack' is connected.
+ Note the hostname/ip-address is not part of the returned path.
+ The network topology of the cluster would determine the number of
+ components in the network path.
+ @param names
+ @return list of resolved network paths]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An interface that should be implemented to allow pluggable
+ DNS-name/IP-address to RackID resolvers.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <!-- start class org.apache.hadoop.net.NetUtils -->
+ <class name="NetUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="clazz" type="java.lang.Class&lt;?&gt;"/>
+ <doc>
+ <![CDATA[Get the socket factory for the given class according to its
+ configuration parameter
+ <tt>hadoop.rpc.socket.factory.class.&lt;ClassName&gt;</tt>. When no
+ such parameter exists then fall back on the default socket factory as
+ configured by <tt>hadoop.rpc.socket.factory.class.default</tt>. If
+ this default socket factory is not configured, then fall back on the JVM
+ default socket factory.
+
+ @param conf the configuration
+ @param clazz the class (usually a {@link VersionedProtocol})
+ @return a socket factory]]>
+ </doc>
+ </method>
+ <method name="getDefaultSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default socket factory as specified by the configuration
+ parameter <tt>hadoop.rpc.socket.factory.default</tt>
+
+ @param conf the configuration
+ @return the default socket factory as specified in the configuration or
+ the JVM default socket factory if the configuration does not
+ contain a default socket factory property.]]>
+ </doc>
+ </method>
+ <method name="getSocketFactoryFromProperty" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="propValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the socket factory corresponding to the given proxy URI. If the
+ given proxy URI corresponds to an absence of configuration parameter,
+ returns null. If the URI is malformed raises an exception.
+
+ @param propValue the property which is the class name of the
+ SocketFactory to instantiate; assumed non null and non empty.
+ @return a socket factory as defined in the property value.]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="getServerAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="oldBindAddressName" type="java.lang.String"/>
+ <param name="oldPortName" type="java.lang.String"/>
+ <param name="newBindAddressName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Handle the transition from pairs of attributes specifying a host and port
+ to a single colon separated one.
+ @param conf the configuration to check
+ @param oldBindAddressName the old address attribute name
+ @param oldPortName the old port attribute name
+ @param newBindAddressName the new combined name
+ @return the complete address from the configuration]]>
+ </doc>
+ </method>
+ <method name="addStaticResolution"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="resolvedName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a static resolution for host. This can be used for setting up
+ hostnames with names that are fake to point to a well known host. For e.g.
+ in some testcases we require to have daemons with different hostnames
+ running on the same machine. In order to create connections to these
+ daemons, one can set up mappings from those hostnames to "localhost".
+ {@link NetUtils#getStaticResolution(String)} can be used to query for
+ the actual hostname.
+ @param host
+ @param resolvedName]]>
+ </doc>
+ </method>
+ <method name="getStaticResolution" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Retrieves the resolved name for the passed host. The resolved name must
+ have been set earlier using
+ {@link NetUtils#addStaticResolution(String, String)}
+ @param host
+ @return the resolution]]>
+ </doc>
+ </method>
+ <method name="getAllStaticResolutions" return="java.util.List&lt;java.lang.String[]&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is used to get all the resolutions that were added using
+ {@link NetUtils#addStaticResolution(String, String)}. The return
+ value is a List each element of which contains an array of String
+ of the form String[0]=hostname, String[1]=resolved-hostname
+ @return the list of resolutions]]>
+ </doc>
+ </method>
+ <method name="getConnectAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="server" type="org.apache.hadoop.ipc.Server"/>
+ <doc>
+ <![CDATA[Returns InetSocketAddress that a client can use to
+ connect to the server. Server.getListenerAddress() is not correct when
+ the server binds to "0.0.0.0". This returns "127.0.0.1:port" when
+ the getListenerAddress() returns "0.0.0.0:port".
+
+ @param server
+ @return socket address that a client can use to connect to the server.]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getInputStream(socket, socket.getSoTimeout()).<br><br>
+
+ From documentation for {@link #getInputStream(Socket, long)}:<br>
+ Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see #getInputStream(Socket, long)
+
+ @param socket
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getOutputStream(socket, 0). Timeout of zero implies write will
+ wait until data is available.<br><br>
+
+ From documentation for {@link #getOutputStream(Socket, long)} : <br>
+ Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see #getOutputStream(Socket, long)
+
+ @param socket
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="normalizeHostName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a string representation of a host, return its ip address
+ in textual presentation.
+
+ @param name a string representation of a host:
+ either a textual representation its IP address or its host name
+ @return its IP address in the string format]]>
+ </doc>
+ </method>
+ <method name="normalizeHostNames" return="java.util.List&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.Collection&lt;java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[Given a collection of string representation of hosts, return a list of
+ corresponding IP addresses in the textual representation.
+
+ @param names a collection of string representations of hosts
+ @return a list of corresponding IP addresses in the string format
+ @see #normalizeHostName(String)]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetUtils -->
+ <!-- start class org.apache.hadoop.net.NetworkTopology -->
+ <class name="NetworkTopology" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetworkTopology"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Add a leaf node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be added
+ @exception IllegalArgumentException if add a node to a leave
+ or node to be added is not a leaf]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Remove a node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be removed]]>
+ </doc>
+ </method>
+ <method name="contains" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if the tree contains node <i>node</i>
+
+ @param node
+ a node
+ @return true if <i>node</i> is already in the tree; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="loc" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a string representation of a node, return its reference
+
+ @param loc
+ a path-like string representation of a node
+ @return a reference to the node; null if the node is not in the tree]]>
+ </doc>
+ </method>
+ <method name="getNumOfRacks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of racks]]>
+ </doc>
+ </method>
+ <method name="getNumOfLeaves" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of nodes]]>
+ </doc>
+ </method>
+ <method name="getDistance" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return the distance between two nodes
+ It is assumed that the distance from one node to its parent is 1
+ The distance between two nodes is calculated by summing up their distances
+ to their closest common ancestor.
+ @param node1 one node
+ @param node2 another node
+ @return the distance between node1 and node2
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="isOnSameRack" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if two nodes are on the same rack
+ @param node1 one node
+ @param node2 another node
+ @return true if node1 and node2 are pm the same rack; false otherwise
+ @exception IllegalArgumentException when either node1 or node2 is null, or
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="chooseRandom" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <doc>
+ <![CDATA[randomly choose one node from <i>scope</i>
+ if scope starts with ~, choose one from the all nodes except for the
+ ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
+ @param scope range of nodes from which a node will be choosen
+ @return the choosen node]]>
+ </doc>
+ </method>
+ <method name="countNumOfAvailableNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <param name="excludedNodes" type="java.util.List&lt;org.apache.hadoop.net.Node&gt;"/>
+ <doc>
+ <![CDATA[return the number of leaves in <i>scope</i> but not in <i>excludedNodes</i>
+ if scope starts with ~, return the number of nodes that are not
+ in <i>scope</i> and <i>excludedNodes</i>;
+ @param scope a path string that may start with ~
+ @param excludedNodes a list of nodes
+ @return number of available nodes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[convert a network tree to a string]]>
+ </doc>
+ </method>
+ <method name="pseudoSortByDistance"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reader" type="org.apache.hadoop.net.Node"/>
+ <param name="nodes" type="org.apache.hadoop.net.Node[]"/>
+ <doc>
+ <![CDATA[Sort nodes array by their distances to <i>reader</i>
+ It linearly scans the array, if a local node is found, swap it with
+ the first element of the array.
+ If a local rack node is found, swap it with the first element following
+ the local node.
+ If neither local node or local rack node is found, put a random replica
+ location at postion 0.
+ It leaves the rest nodes untouched.]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_RACK" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_HOST_LEVEL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The class represents a cluster of computer with a tree hierarchical
+ network topology.
+ For example, a cluster may be consists of many data centers filled
+ with racks of computers.
+ In a network topology, leaves represent data nodes (computers) and inner
+ nodes represent switches/routers that manage traffic in/out of data centers
+ or racks.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetworkTopology -->
+ <!-- start interface org.apache.hadoop.net.Node -->
+ <interface name="Node" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the string representation of this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the node's network location]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface defines a node in a network topology.
+ A node may be a leave representing a data node or an inner
+ node representing a datacenter or rack.
+ Each data has a name and its location in the network is
+ decided by a string with syntax similar to a file name.
+ For example, a data node's name is hostname:port# and if it's located at
+ rack "orange" in datacenter "dog", the string representation of its
+ network location is /dog/orange]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.Node -->
+ <!-- start class org.apache.hadoop.net.NodeBase -->
+ <class name="NodeBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.Node"/>
+ <constructor name="NodeBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its path
+ @param path
+ a concatenation of this node's location, the path seperator, and its name]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String, org.apache.hadoop.net.Node, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location
+ @param parent this node's parent node
+ @param level this node's level in the tree]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set this node's network location]]>
+ </doc>
+ </method>
+ <method name="getPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return this node's path]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's string representation]]>
+ </doc>
+ </method>
+ <method name="normalize" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Normalize a path]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="level" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree]]>
+ </doc>
+ </method>
+ <field name="PATH_SEPARATOR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PATH_SEPARATOR_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ROOT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="name" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="location" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="level" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="parent" type="org.apache.hadoop.net.Node"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class that implements interface Node]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NodeBase -->
+ <!-- start class org.apache.hadoop.net.ScriptBasedMapping -->
+ <class name="ScriptBasedMapping" extends="org.apache.hadoop.net.CachedDNSToSwitchMapping"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="ScriptBasedMapping"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ScriptBasedMapping" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[This class implements the {@link DNSToSwitchMapping} interface using a
+ script configured via topology.script.file.name .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.ScriptBasedMapping -->
+ <!-- start class org.apache.hadoop.net.SocketInputStream -->
+ <class name="SocketInputStream" extends="java.io.InputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.ReadableByteChannel"/>
+ <constructor name="SocketInputStream" type="java.nio.channels.ReadableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for reading, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), timeout): <br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), socket.getSoTimeout())
+ :<br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.ReadableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by inputstream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferFrom(ReadableByteChannel, long, long)}.]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="waitForReadable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[waits for the underlying channel to be ready for reading.
+ The timeout specified for this stream applies to this wait.
+
+ @throws SocketTimeoutException
+ if select on the channel times out.
+ @throws IOException
+ if any other I/O error occurs.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This implements an input stream that can have a timeout while reading.
+ This sets non-blocking flag on the socket channel.
+ So after create this object, read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} for the associated socket will throw
+ IllegalBlockingModeException.
+ Please use {@link SocketOutputStream} for writing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketInputStream -->
+ <!-- start class org.apache.hadoop.net.SocketOutputStream -->
+ <class name="SocketOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.WritableByteChannel"/>
+ <constructor name="SocketOutputStream" type="java.nio.channels.WritableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for writing, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketOutputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketOutputStream(socket.getChannel(), timeout):<br><br>
+
+ Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketOutputStream#SocketOutputStream(WritableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.WritableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by this stream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="waitForWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[waits for the underlying channel to be ready for writing.
+ The timeout specified for this stream applies to this wait.
+
+ @throws SocketTimeoutException
+ if select on the channel times out.
+ @throws IOException
+ if any other I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="transferToFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileCh" type="java.nio.channels.FileChannel"/>
+ <param name="position" type="long"/>
+ <param name="count" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Transfers data from FileChannel using
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}.
+
+ Similar to readFully(), this waits till requested amount of
+ data is transfered.
+
+ @param fileCh FileChannel to transfer data from.
+ @param position position within the channel where the transfer begins
+ @param count number of bytes to transfer.
+
+ @throws EOFException
+ If end of input file is reached before requested number of
+ bytes are transfered.
+
+ @throws SocketTimeoutException
+ If this channel blocks transfer longer than timeout for
+ this stream.
+
+ @throws IOException Includes any exception thrown by
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This implements an output stream that can have a timeout while writing.
+ This sets non-blocking flag on the socket channel.
+ So after creating this object , read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} on the associated socket will throw
+ llegalBlockingModeException.
+ Please use {@link SocketInputStream} for reading.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketOutputStream -->
+ <!-- start class org.apache.hadoop.net.SocksSocketFactory -->
+ <class name="SocksSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="SocksSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <constructor name="SocksSocketFactory" type="java.net.Proxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with a supplied Proxy
+
+ @param proxy the proxy to use to create sockets]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocksSocketFactory -->
+ <!-- start class org.apache.hadoop.net.StandardSocketFactory -->
+ <class name="StandardSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StandardSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.StandardSocketFactory -->
+</package>
+<package name="org.apache.hadoop.record">
+ <!-- start class org.apache.hadoop.record.BinaryRecordInput -->
+ <class name="BinaryRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="BinaryRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordInput" type="java.io.DataInput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inp" type="java.io.DataInput"/>
+ <doc>
+ <![CDATA[Get a thread-local record input for the supplied DataInput.
+ @param inp data input stream
+ @return binary record input corresponding to the supplied DataInput.]]>
+ </doc>
+ </method>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordInput -->
+ <!-- start class org.apache.hadoop.record.BinaryRecordOutput -->
+ <class name="BinaryRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="BinaryRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordOutput" type="java.io.DataOutput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <doc>
+ <![CDATA[Get a thread-local record output for the supplied DataOutput.
+ @param out data output stream
+ @return binary record output corresponding to the supplied DataOutput.]]>
+ </doc>
+ </method>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordOutput -->
+ <!-- start class org.apache.hadoop.record.Buffer -->
+ <class name="Buffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Buffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-count sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte array as the initial value.
+
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[], int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte range as the initial value.
+
+ @param bytes Copy of this array becomes the backing storage for the object.
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Use the specified bytes array as underlying sequence.
+
+ @param bytes byte sequence]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Copy the specified byte array to the Buffer. Replaces the current buffer.
+
+ @param bytes byte array to be assigned
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the Buffer.
+
+ @return The data is only valid between 0 and getCount() - 1.]]>
+ </doc>
+ </method>
+ <method name="getCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current count of the buffer.]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum count that could handled without
+ resizing the backing storage.
+
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newCapacity" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved if newCapacity >= getCount().
+ @param newCapacity The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the buffer to 0 size]]>
+ </doc>
+ </method>
+ <method name="truncate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Change the capacity of the backing store to be the same as the current
+ count of buffer.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer.
+
+ @param bytes byte array to be appended
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer
+
+ @param bytes byte array to be appended]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Define the sort order of the Buffer.
+
+ @param other The other buffer
+ @return Positive if this is bigger than other, 0 if they are equal, and
+ negative if this is smaller than other.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="charsetName" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ <doc>
+ <![CDATA[Convert the byte buffer to a string an specific character encoding
+
+ @param charsetName Valid Java Character Set Name]]>
+ </doc>
+ </method>
+ <method name="clone" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="CloneNotSupportedException" type="java.lang.CloneNotSupportedException"/>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is used as a Java native type for buffer.
+ It is resizable and distinguishes between the count of the seqeunce and
+ the current capacity.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Buffer -->
+ <!-- start class org.apache.hadoop.record.CsvRecordInput -->
+ <class name="CsvRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="CsvRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordInput -->
+ <!-- start class org.apache.hadoop.record.CsvRecordOutput -->
+ <class name="CsvRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="CsvRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordOutput -->
+ <!-- start interface org.apache.hadoop.record.Index -->
+ <interface name="Index" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="done" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="incr"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Interface that acts as an iterator for deserializing maps.
+ The deserializer returns an instance that the record uses to
+ read vectors and maps. An example of usage is as follows:
+
+ <code>
+ Index idx = startVector(...);
+ while (!idx.done()) {
+ .... // read element of a vector
+ idx.incr();
+ }
+ </code>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.Index -->
+ <!-- start class org.apache.hadoop.record.Record -->
+ <class name="Record" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Record"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="serialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record with tag (ususally field name)
+ @param rout Record output destination
+ @param tag record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record with a tag (usually field name)
+ @param rin Record input source
+ @param tag Record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record without a tag
+ @param rout Record output destination]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record without a tag
+ @param rin Record input source]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="din" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Abstract class that is extended by generated classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Record -->
+ <!-- start class org.apache.hadoop.record.RecordComparator -->
+ <class name="RecordComparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordComparator" type="java.lang.Class&lt;? extends org.apache.hadoop.io.WritableComparable&gt;"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a raw {@link Record} comparison implementation.]]>
+ </doc>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.record.RecordComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link Record} implementation.
+
+ @param c record classs for which a raw comparator is provided
+ @param comparator Raw comparator instance for class c]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A raw record comparator base class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.RecordComparator -->
+ <!-- start interface org.apache.hadoop.record.RecordInput -->
+ <interface name="RecordInput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a byte from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a boolean from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a long integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a single-precision float from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a double-precision number from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read byte array from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of elements.]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of map entries.]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that all the Deserializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordInput -->
+ <!-- start interface org.apache.hadoop.record.RecordOutput -->
+ <interface name="RecordOutput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a byte to serialized record.
+ @param b Byte to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a boolean to serialized record.
+ @param b Boolean to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write an integer to serialized record.
+ @param i Integer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a long integer to serialized record.
+ @param l Long to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a single-precision float to serialized record.
+ @param f Float to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a double precision floating point number to serialized record.
+ @param d Double to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a unicode string to serialized record.
+ @param s String to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a buffer to serialized record.
+ @param buf Buffer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a record to be serialized.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized record.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a vector to be serialized.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized vector.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a map to be serialized.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized map.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that alll the serializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordOutput -->
+ <!-- start class org.apache.hadoop.record.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a stream and return it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a stream and returns it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an int to a binary stream with zero-compressed encoding.
+
+ @param stream Binary output stream
+ @param i int to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <field name="hexchars" type="char[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Utils -->
+ <!-- start class org.apache.hadoop.record.XmlRecordInput -->
+ <class name="XmlRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="XmlRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Deserializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordInput -->
+ <!-- start class org.apache.hadoop.record.XmlRecordOutput -->
+ <class name="XmlRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="XmlRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Serializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordOutput -->
+</package>
+<package name="org.apache.hadoop.record.compiler">
+ <!-- start class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <class name="CodeBuffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A wrapper around StringBuffer that automatically does indentation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.Consts -->
+ <class name="Consts" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="RIO_PREFIX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_VAR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER_FIELDS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_OUTPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_INPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TAG" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[const definitions for Record I/O compiler]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.Consts -->
+ <!-- start class org.apache.hadoop.record.compiler.JBoolean -->
+ <class name="JBoolean" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBoolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBoolean]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBoolean -->
+ <!-- start class org.apache.hadoop.record.compiler.JBuffer -->
+ <class name="JBuffer" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBuffer]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "buffer" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.JByte -->
+ <class name="JByte" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JByte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "byte" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JByte -->
+ <!-- start class org.apache.hadoop.record.compiler.JDouble -->
+ <class name="JDouble" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JDouble"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JDouble]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JDouble -->
+ <!-- start class org.apache.hadoop.record.compiler.JField -->
+ <class name="JField" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JField" type="java.lang.String, T"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JField]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[A thin wrappper around record field.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JField -->
+ <!-- start class org.apache.hadoop.record.compiler.JFile -->
+ <class name="JFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFile" type="java.lang.String, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JFile&gt;, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFile
+
+ @param name possibly full pathname to the file
+ @param inclFiles included files (as JFile)
+ @param recList List of records defined within this file]]>
+ </doc>
+ </constructor>
+ <method name="genCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <param name="destDir" type="java.lang.String"/>
+ <param name="options" type="java.util.ArrayList&lt;java.lang.String&gt;"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate record code in given language. Language should be all
+ lowercase.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Container for the Hadoop Record DDL.
+ The main components of the file are filename, list of included files,
+ and records defined in that file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFile -->
+ <!-- start class org.apache.hadoop.record.compiler.JFloat -->
+ <class name="JFloat" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFloat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFloat]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFloat -->
+ <!-- start class org.apache.hadoop.record.compiler.JInt -->
+ <class name="JInt" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JInt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JInt]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "int" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JInt -->
+ <!-- start class org.apache.hadoop.record.compiler.JLong -->
+ <class name="JLong" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JLong"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JLong]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "long" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JLong -->
+ <!-- start class org.apache.hadoop.record.compiler.JMap -->
+ <class name="JMap" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JMap" type="org.apache.hadoop.record.compiler.JType, org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JMap]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JMap -->
+ <!-- start class org.apache.hadoop.record.compiler.JRecord -->
+ <class name="JRecord" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JRecord" type="java.lang.String, java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JField&lt;org.apache.hadoop.record.compiler.JType&gt;&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JRecord]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JRecord -->
+ <!-- start class org.apache.hadoop.record.compiler.JString -->
+ <class name="JString" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JString"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JString]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JString -->
+ <!-- start class org.apache.hadoop.record.compiler.JType -->
+ <class name="JType" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Abstract Base class for all types supported by Hadoop Record I/O.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JType -->
+ <!-- start class org.apache.hadoop.record.compiler.JVector -->
+ <class name="JVector" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JVector" type="org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JVector]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JVector -->
+</package>
+<package name="org.apache.hadoop.record.compiler.ant">
+ <!-- start class org.apache.hadoop.record.compiler.ant.RccTask -->
+ <class name="RccTask" extends="org.apache.tools.ant.Task"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RccTask"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of RccTask]]>
+ </doc>
+ </constructor>
+ <method name="setLanguage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the output language option
+ @param language "java"/"c++"]]>
+ </doc>
+ </method>
+ <method name="setFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets the record definition file attribute
+ @param file record definition file]]>
+ </doc>
+ </method>
+ <method name="setFailonerror"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="flag" type="boolean"/>
+ <doc>
+ <![CDATA[Given multiple files (via fileset), set the error handling behavior
+ @param flag true will throw build exception in case of failure (default)]]>
+ </doc>
+ </method>
+ <method name="setDestdir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets directory where output files will be generated
+ @param dir output directory]]>
+ </doc>
+ </method>
+ <method name="addFileset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="set" type="org.apache.tools.ant.types.FileSet"/>
+ <doc>
+ <![CDATA[Adds a fileset that can consist of one or more files
+ @param set Set of record definition files]]>
+ </doc>
+ </method>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="BuildException" type="org.apache.tools.ant.BuildException"/>
+ <doc>
+ <![CDATA[Invoke the Hadoop record compiler on each record definition file]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Hadoop record compiler ant Task
+<p> This task takes the given record definition files and compiles them into
+ java or c++
+ files. It is then up to the user to compile the generated files.
+
+ <p> The task requires the <code>file</code> or the nested fileset element to be
+ specified. Optional attributes are <code>language</code> (set the output
+ language, default is "java"),
+ <code>destdir</code> (name of the destination directory for generated java/c++
+ code, default is ".") and <code>failonerror</code> (specifies error handling
+ behavior. default is true).
+ <p><h4>Usage</h4>
+ <pre>
+ &lt;recordcc
+ destdir="${basedir}/gensrc"
+ language="java"&gt;
+ &lt;fileset include="**\/*.jr" /&gt;
+ &lt;/recordcc&gt;
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.ant.RccTask -->
+</package>
+<package name="org.apache.hadoop.record.compiler.generated">
+ <!-- start class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <class name="ParseException" extends="java.lang.Exception"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ParseException" type="org.apache.hadoop.record.compiler.generated.Token, int[][], java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constructor is used by the method "generateParseException"
+ in the generated parser. Calling this constructor generates
+ a new object of this type with the fields "currentToken",
+ "expectedTokenSequences", and "tokenImage" set. The boolean
+ flag "specialConstructor" is also set to true to indicate that
+ this constructor was used to create this object.
+ This constructor calls its super class with the empty string
+ to force the "toString" method of parent class "Throwable" to
+ print the error message in the form:
+ ParseException: <result of getMessage>]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The following constructors are for use by you for whatever
+ purpose you can think of. Constructing the exception in this
+ manner makes the exception behave in the normal way - i.e., as
+ documented in the class "Throwable". The fields "errorToken",
+ "expectedTokenSequences", and "tokenImage" do not contain
+ relevant information. The JavaCC generated code does not use
+ these constructors.]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method has the standard behavior when this object has been
+ created using the standard constructors. Otherwise, it uses
+ "currentToken" and "expectedTokenSequences" to generate a parse
+ error message and returns it. If this object has been created
+ due to a parse error, and you do not catch it (it gets thrown
+ from the parser), then this method is called during the printing
+ of the final stack trace, and hence the correct error message
+ gets displayed.]]>
+ </doc>
+ </method>
+ <method name="add_escapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Used to convert raw characters to their escaped version
+ when these raw version cannot be used as part of an ASCII
+ string literal.]]>
+ </doc>
+ </method>
+ <field name="specialConstructor" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This variable determines which constructor was used to create
+ this object and thereby affects the semantics of the
+ "getMessage" method (see below).]]>
+ </doc>
+ </field>
+ <field name="currentToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is the last token that has been consumed successfully. If
+ this object has been created due to a parse error, the token
+ followng this token will (therefore) be the first error token.]]>
+ </doc>
+ </field>
+ <field name="expectedTokenSequences" type="int[][]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Each entry in this array is an array of integers. Each array
+ of integers represents a sequence of tokens (by their ordinal
+ values) that is expected at this point of the parse.]]>
+ </doc>
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is a reference to the "tokenImage" array of the generated
+ parser within which the parse error occurred. This array is
+ defined in the generated ...Constants interface.]]>
+ </doc>
+ </field>
+ <field name="eol" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The end of line string for this machine.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This exception is thrown when parse errors are encountered.
+ You can explicitly create objects of this exception type by
+ calling the method generateParseException in the generated
+ parser.
+
+ You can modify this class to customize your error reporting
+ mechanisms so long as you retain the public fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <class name="Rcc" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="Rcc" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="usage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="driver" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="Input" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Include" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Module" return="java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ModuleName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="RecordList" return="java.util.ArrayList&lt;org.apache.hadoop.record.compiler.JRecord&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Record" return="org.apache.hadoop.record.compiler.JRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Field" return="org.apache.hadoop.record.compiler.JField&lt;org.apache.hadoop.record.compiler.JType&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Type" return="org.apache.hadoop.record.compiler.JType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Map" return="org.apache.hadoop.record.compiler.JMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Vector" return="org.apache.hadoop.record.compiler.JVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tm" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"/>
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <method name="generateParseException" return="org.apache.hadoop.record.compiler.generated.ParseException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="enable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="disable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="token_source" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="token" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jj_nt" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <!-- start interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <interface name="RccConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="EOF" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MODULE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCLUDE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BOOLEAN_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SEMICOLON_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CSTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IDENT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinOneLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinMultiLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <class name="RccTokenManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setDebugStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ds" type="java.io.PrintStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="SwitchTo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="jjFillToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="debugStream" type="java.io.PrintStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjstrLiteralImages" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="lexStateNames" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjnewLexState" type="int[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="input_stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="curChar" type="char"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <class name="SimpleCharStream" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setTabSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="getTabSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="ExpandBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="wrapAround" type="boolean"/>
+ </method>
+ <method name="FillBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="BeginToken" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="UpdateLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="c" type="char"/>
+ </method>
+ <method name="readChar" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getEndColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getEndLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="backup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="amount" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="GetImage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="GetSuffix" return="char[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="int"/>
+ </method>
+ <method name="Done"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="adjustBeginLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newLine" type="int"/>
+ <param name="newCol" type="int"/>
+ <doc>
+ <![CDATA[Method to adjust line and column numbers for the start of a token.]]>
+ </doc>
+ </method>
+ <field name="staticFlag" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufpos" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufline" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufcolumn" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="column" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="line" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsCR" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsLF" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputStream" type="java.io.Reader"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="buffer" type="char[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="maxNextCharInd" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inBuf" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="tabSize" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of interface CharStream, where the stream is assumed to
+ contain only ASCII characters (without unicode processing).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Token -->
+ <class name="Token" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Token"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the image.]]>
+ </doc>
+ </method>
+ <method name="newToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="ofKind" type="int"/>
+ <doc>
+ <![CDATA[Returns a new Token object, by default. However, if you want, you
+ can create and return subclass objects based on the value of ofKind.
+ Simply add the cases to the switch for all those special cases.
+ For example, if you have a subclass of Token called IDToken that
+ you want to create if ofKind is ID, simlpy add something like :
+
+ case MyParserConstants.ID : return new IDToken();
+
+ to the following switch statement. Then you can cast matchedToken
+ variable to the appropriate type and use it in your lexical actions.]]>
+ </doc>
+ </method>
+ <field name="kind" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[An integer that describes the kind of this token. This numbering
+ system is determined by JavaCCParser, and a table of these numbers is
+ stored in the file ...Constants.java.]]>
+ </doc>
+ </field>
+ <field name="beginLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="beginColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="image" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The string image of the token.]]>
+ </doc>
+ </field>
+ <field name="next" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A reference to the next regular (non-special) token from the input
+ stream. If this is the last token from the input stream, or if the
+ token manager has not read tokens beyond this one, this field is
+ set to null. This is true only if this token is also a regular
+ token. Otherwise, see below for a description of the contents of
+ this field.]]>
+ </doc>
+ </field>
+ <field name="specialToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This field is used to access special tokens that occur prior to this
+ token, but after the immediately preceding regular (non-special) token.
+ If there are no such special tokens, this field is set to null.
+ When there are more than one such special token, this field refers
+ to the last of these special tokens, which in turn refers to the next
+ previous special token through its specialToken field, and so on
+ until the first special token (whose specialToken field is null).
+ The next fields of special tokens refer to other special tokens that
+ immediately follow it (without an intervening regular token). If there
+ is no such token, this field is null.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Describes the input token stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Token -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+ <class name="TokenMgrError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TokenMgrError"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="boolean, int, int, int, java.lang.String, char, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addEscapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Replaces unprintable characters by their espaced (or unicode escaped)
+ equivalents in the given string]]>
+ </doc>
+ </method>
+ <method name="LexicalError" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="EOFSeen" type="boolean"/>
+ <param name="lexState" type="int"/>
+ <param name="errorLine" type="int"/>
+ <param name="errorColumn" type="int"/>
+ <param name="errorAfter" type="java.lang.String"/>
+ <param name="curChar" type="char"/>
+ <doc>
+ <![CDATA[Returns a detailed message for the Error when it is thrown by the
+ token manager to indicate a lexical error.
+ Parameters :
+ EOFSeen : indicates if EOF caused the lexicl error
+ curLexState : lexical state in which this error occured
+ errorLine : line number when the error occured
+ errorColumn : column number when the error occured
+ errorAfter : prefix that was seen before this error occured
+ curchar : the offending character
+ Note: You can customize the lexical error message by modifying this method.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[You can also modify the body of this method to customize your error messages.
+ For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not
+ of end-users concern, so you can return something like :
+
+ "Internal Error : Please file a bug report .... "
+
+ from this method for such cases in the release version of your parser.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+</package>
+<package name="org.apache.hadoop.record.meta">
+ <!-- start class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <class name="FieldTypeInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's TypeID object]]>
+ </doc>
+ </method>
+ <method name="getFieldID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's id (name)]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two FieldTypeInfos are equal if ach of their fields matches]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ti" type="org.apache.hadoop.record.meta.FieldTypeInfo"/>
+ </method>
+ <doc>
+ <![CDATA[Represents a type information for a field, which is made up of its
+ ID (name) and its type (a TypeID object).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.MapTypeID -->
+ <class name="MapTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapTypeID" type="org.apache.hadoop.record.meta.TypeID, org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getKeyTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's key element]]>
+ </doc>
+ </method>
+ <method name="getValueTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's value element]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two map typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a Map]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.MapTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <class name="RecordTypeInfo" extends="org.apache.hadoop.record.Record"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty RecordTypeInfo object.]]>
+ </doc>
+ </constructor>
+ <constructor name="RecordTypeInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a RecordTypeInfo object representing a record with the given name
+ @param name Name of the record]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the name of the record]]>
+ </doc>
+ </method>
+ <method name="setName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[set the name of the record]]>
+ </doc>
+ </method>
+ <method name="addField"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fieldName" type="java.lang.String"/>
+ <param name="tid" type="org.apache.hadoop.record.meta.TypeID"/>
+ <doc>
+ <![CDATA[Add a field.
+ @param fieldName Name of the field
+ @param tid Type ID of the field]]>
+ </doc>
+ </method>
+ <method name="getFieldTypeInfos" return="java.util.Collection&lt;org.apache.hadoop.record.meta.FieldTypeInfo&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a collection of field type infos]]>
+ </doc>
+ </method>
+ <method name="getNestedStructTypeInfo" return="org.apache.hadoop.record.meta.RecordTypeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the type info of a nested record. We only consider nesting
+ to one level.
+ @param name Name of the nested record]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer_" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ <doc>
+ <![CDATA[This class doesn't implement Comparable as it's not meant to be used
+ for anything besides de/serializing.
+ So we always throw an exception.
+ Not implemented. Always returns 0 if another RecordTypeInfo is passed in.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A record's Type Information object which can read/write itself.
+
+ Type information for a record comprises metadata about the record,
+ as well as a collection of type information for each field in the record.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.StructTypeID -->
+ <class name="StructTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StructTypeID" type="org.apache.hadoop.record.meta.RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a StructTypeID based on the RecordTypeInfo of some record]]>
+ </doc>
+ </constructor>
+ <method name="getFieldTypeInfos" return="java.util.Collection&lt;org.apache.hadoop.record.meta.FieldTypeInfo&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a struct]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.StructTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID -->
+ <class name="TypeID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeVal" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type value. One of the constants in RIOType.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two base typeIDs are equal if they refer to the same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <field name="BoolTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constant classes for the basic types, so we can share them.]]>
+ </doc>
+ </field>
+ <field name="BufferTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ByteTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DoubleTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FloatTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IntTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LongTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="StringTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="typeVal" type="byte"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Represents typeID for basic types.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <class name="TypeID.RIOType" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TypeID.RIOType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <field name="BOOL" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRUCT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[constants representing the IDL types we support]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <!-- start class org.apache.hadoop.record.meta.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <param name="typeID" type="org.apache.hadoop.record.meta.TypeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[read/skip bytes from stream based on a type]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O platform.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.Utils -->
+ <!-- start class org.apache.hadoop.record.meta.VectorTypeID -->
+ <class name="VectorTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VectorTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getElementTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two vector typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for vector.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.VectorTypeID -->
+</package>
+<package name="org.apache.hadoop.security">
+ <!-- start class org.apache.hadoop.security.AccessControlException -->
+ <class name="AccessControlException" extends="org.apache.hadoop.fs.permission.AccessControlException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AccessControlException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor is needed for unwrapping from
+ {@link org.apache.hadoop.ipc.RemoteException}.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an {@link AccessControlException}
+ with the specified detail message.
+ @param s the detail message.]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[An exception class for access control related issues.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.AccessControlException -->
+ <!-- start class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <class name="UnixUserGroupInformation" extends="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnixUserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameters user name and its group names.
+ The first entry in the groups list is the default group.
+
+ @param userName a user's name
+ @param groupNames groups list, first of which is the default group
+ @exception IllegalArgumentException if any argument is null]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameter user/group names
+
+ @param ugi an array containing user/group names, the first
+ element of which is the user name, the second of
+ which is the default group name.
+ @exception IllegalArgumentException if the array size is less than 2
+ or any element is null.]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Create an immutable {@link UnixUserGroupInformation} object.]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an array of group names]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the user's name]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize this object
+ First check if this is a UGI in the string format.
+ If no, throw an IOException; otherwise
+ set this object's fields by reading them from the given data input
+
+ @param in input stream
+ @exception IOException is thrown if encounter any error when reading]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize this object
+ First write a string marking that this is a UGI in the string format,
+ then write this object's serialized form to the given data output
+
+ @param out output stream
+ @exception IOException if encounter any error during writing]]>
+ </doc>
+ </method>
+ <method name="saveToConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <param name="ugi" type="org.apache.hadoop.security.UnixUserGroupInformation"/>
+ <doc>
+ <![CDATA[Store the given <code>ugi</code> as a comma separated string in
+ <code>conf</code> as a property <code>attr</code>
+
+ The String starts with the user name followed by the default group names,
+ and other group names.
+
+ @param conf configuration
+ @param attr property name
+ @param ugi a UnixUserGroupInformation]]>
+ </doc>
+ </method>
+ <method name="readFromConf" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Read a UGI from the given <code>conf</code>
+
+ The object is expected to store with the property name <code>attr</code>
+ as a comma separated string that starts
+ with the user name followed by group names.
+ If the property name is not defined, return null.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise, construct a UGI from the configuration, store it in the
+ ugi map and return it.
+
+ @param conf configuration
+ @param attr property name
+ @return a UnixUGI
+ @throws LoginException if the stored string is ill-formatted.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get current user's name and the names of all its groups from Unix.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise get the current user's information from Unix, store it
+ in the map, and return it.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Equivalent to login(conf, false).]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="save" type="boolean"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get a user's name & its group names from the given configuration;
+ If it is not defined in the configuration, get the current user's
+ information from Unix.
+ If the user has a UGI in the ugi map, return the one in
+ the UGI map.
+
+ @param conf either a job configuration or client's configuration
+ @param save saving it to conf?
+ @return UnixUserGroupInformation a user/group information
+ @exception LoginException if not able to get the user/group information]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Decide if two UGIs are the same
+
+ @param other other object
+ @return true if they are the same; false otherwise.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code for this UGI.
+ The hash code for a UGI is the hash code of its user name string.
+
+ @return a hash code value for this UGI.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this object to a string
+
+ @return a comma separated string containing the user name and group names]]>
+ </doc>
+ </method>
+ <field name="UGI_PROPERTY_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of UserGroupInformation in the Unix system]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <!-- start class org.apache.hadoop.security.UserGroupInformation -->
+ <class name="UserGroupInformation" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="UserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCurrentUGI" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="setCurrentUGI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <doc>
+ <![CDATA[Set the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get username
+
+ @return the user's name]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the groups that the user belong to
+
+ @return an array of group names]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Login and return a UserGroupInformation object.]]>
+ </doc>
+ </method>
+ <method name="readFrom" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link UserGroupInformation} from conf]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A {@link Writable} abstract class for storing user and groups information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UserGroupInformation -->
+</package>
+<package name="org.apache.hadoop.tools">
+ <!-- start class org.apache.hadoop.tools.DistCp -->
+ <class name="DistCp" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="DistCp" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="destPath" type="java.lang.String"/>
+ <param name="logPath" type="org.apache.hadoop.fs.Path"/>
+ <param name="srcAsList" type="boolean"/>
+ <param name="ignoreReadFailures" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[This is the main driver for recursively copying directories
+ across file systems. It takes at least two cmdline parameters. A source
+ URL and a destination URL. It then essentially does an "ls -lR" on the
+ source URL, and writes the output in a round-robin manner to all the map
+ input files. The mapper actually copies the files allotted to it. The
+ reduce is empty.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="getRandomId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Map-reduce program to recursively copy directories between
+ different file-systems.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCp -->
+ <!-- start class org.apache.hadoop.tools.DistCp.DuplicationException -->
+ <class name="DistCp.DuplicationException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="ERROR_CODE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Error code for this exception]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An exception class for duplicated source files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCp.DuplicationException -->
+ <!-- start class org.apache.hadoop.tools.HadoopArchives -->
+ <class name="HadoopArchives" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="HadoopArchives" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="archive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPaths" type="java.util.List&lt;org.apache.hadoop.fs.Path&gt;"/>
+ <param name="archiveName" type="java.lang.String"/>
+ <param name="dest" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[archive the given source paths into
+ the dest
+ @param srcPaths the src paths to be archived
+ @param dest the dest dir that will contain the archive]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[the main driver for creating the archives
+ it takes at least two command line parameters. The src and the
+ dest. It does an lsr on the source paths.
+ The mapper created archuves and the reducer creates
+ the archive index.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[the main functions]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[a archive creation utility.
+ This class provides methods that can be used
+ to create hadoop archives. For understanding of
+ Hadoop archives look at {@link HarFileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.HadoopArchives -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer -->
+ <class name="Logalyzer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Logalyzer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doArchive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logListURI" type="java.lang.String"/>
+ <param name="archiveDirectory" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doArchive: Workhorse function to archive log-files.
+ @param logListURI : The uri which will serve list of log-files to archive.
+ @param archiveDirectory : The directory to store archived logfiles.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="doAnalyze"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFilesDirectory" type="java.lang.String"/>
+ <param name="outputDirectory" type="java.lang.String"/>
+ <param name="grepPattern" type="java.lang.String"/>
+ <param name="sortColumns" type="java.lang.String"/>
+ <param name="columnSeparator" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doAnalyze:
+ @param inputFilesDirectory : Directory containing the files to be analyzed.
+ @param outputDirectory : Directory to store analysis (output).
+ @param grepPattern : Pattern to *grep* for.
+ @param sortColumns : Sort specification for output.
+ @param columnSeparator : Column separator.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[Logalyzer: A utility tool for archiving and analyzing hadoop logs.
+ <p>
+ This tool supports archiving and anaylzing (sort/grep) of log-files.
+ It takes as input
+ a) Input uri which will serve uris of the logs to be archived.
+ b) Output directory (not mandatory).
+ b) Directory on dfs to archive the logs.
+ c) The sort/grep patterns for analyzing the files and separator for boundaries.
+ Usage:
+ Logalyzer -archive -archiveDir <directory to archive logs> -analysis <directory> -logs <log-list uri> -grep <pattern> -sort <col1, col2> -separator <separator>
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <class name="Logalyzer.LogComparator" extends="org.apache.hadoop.io.Text.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Logalyzer.LogComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys of the logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+ <class name="Logalyzer.LogRegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper&lt;K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <constructor name="Logalyzer.LogRegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector&lt;org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable&gt;"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+</package>
+<package name="org.apache.hadoop.util">
+ <!-- start class org.apache.hadoop.util.CyclicIteration -->
+ <class name="CyclicIteration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable&lt;java.util.Map.Entry&lt;K, V&gt;&gt;"/>
+ <constructor name="CyclicIteration" type="java.util.NavigableMap&lt;K, V&gt;, K"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an {@link Iterable} object,
+ so that an {@link Iterator} can be created
+ for iterating the given {@link NavigableMap}.
+ The iteration begins from the starting key exclusively.]]>
+ </doc>
+ </constructor>
+ <method name="iterator" return="java.util.Iterator&lt;java.util.Map.Entry&lt;K, V&gt;&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide an cyclic {@link Iterator} for a {@link NavigableMap}.
+ The {@link Iterator} navigates the entries of the map
+ according to the map's ordering.
+ If the {@link Iterator} hits the last entry of the map,
+ it will then continue from the first entry.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.CyclicIteration -->
+ <!-- start class org.apache.hadoop.util.Daemon -->
+ <class name="Daemon" extends="java.lang.Thread"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Daemon"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.ThreadGroup, java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread to be part of a specified thread group.]]>
+ </doc>
+ </constructor>
+ <method name="getRunnable" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A thread that has called {@link Thread#setDaemon(boolean) } with true.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Daemon -->
+ <!-- start class org.apache.hadoop.util.DataChecksum -->
+ <class name="DataChecksum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.zip.Checksum"/>
+ <method name="newDataChecksum" return="org.apache.hadoop.util.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="int"/>
+ <param name="bytesPerChecksum" type="int"/>
+ </method>
+ <method name="newDataChecksum" return="org.apache.hadoop.util.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <doc>
+ <![CDATA[Creates a DataChecksum from HEADER_LEN bytes from arr[offset].
+ @return DataChecksum of the type in the array or null in case of an error.]]>
+ </doc>
+ </method>
+ <method name="newDataChecksum" return="org.apache.hadoop.util.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This constructucts a DataChecksum by reading HEADER_LEN bytes from
+ input stream <i>in</i>]]>
+ </doc>
+ </method>
+ <method name="writeHeader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the checksum header to the output stream <i>out</i>.]]>
+ </doc>
+ </method>
+ <method name="getHeader" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="writeValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="reset" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the current checksum to the stream.
+ If <i>reset</i> is true, then resets the checksum.
+ @return number of bytes written. Will be equal to getChecksumSize();]]>
+ </doc>
+ </method>
+ <method name="writeValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="reset" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the current checksum to a buffer.
+ If <i>reset</i> is true, then resets the checksum.
+ @return number of bytes written. Will be equal to getChecksumSize();]]>
+ </doc>
+ </method>
+ <method name="compare" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <doc>
+ <![CDATA[Compares the checksum located at buf[offset] with the current checksum.
+ @return true if the checksum matches and false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getChecksumType" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getChecksumSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesPerChecksum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumBytesInSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getChecksumHeaderSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getValue" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ </method>
+ <field name="HEADER_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKSUM_NULL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKSUM_CRC32" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SIZE_OF_INTEGER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class provides inteface and utilities for processing checksums for
+ DFS data transfers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.DataChecksum -->
+ <!-- start class org.apache.hadoop.util.DiskChecker -->
+ <class name="DiskChecker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mkdirsWithExistsCheck" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[The semantics of mkdirsWithExistsCheck method is different from the mkdirs
+ method provided in the Sun's java.io.File class in the following way:
+ While creating the non-existent parent directories, this method checks for
+ the existence of those directories if the mkdir fails at any point (since
+ that directory might have just been created by some other process).
+ If both mkdir() and the exists() check fails for any seemingly
+ non-existent directory, then we signal an error; Sun's mkdir would signal
+ an error (return false) if a directory it is attempting to create already
+ exists or the mkdir fails.
+ @param dir
+ @return true on success, false on failure]]>
+ </doc>
+ </method>
+ <method name="checkDir"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ </method>
+ <doc>
+ <![CDATA[Class that provides utility functions for checking disk problem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <class name="DiskChecker.DiskErrorException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskErrorException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <class name="DiskChecker.DiskOutOfSpaceException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskOutOfSpaceException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <!-- start class org.apache.hadoop.util.GenericOptionsParser -->
+ <class name="GenericOptionsParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser<code> to parse only the generic Hadoop
+ arguments.
+
+ The array of string arguments other than the generic arguments can be
+ obtained by {@link #getRemainingArgs()}.
+
+ @param conf the <code>Configuration</code> to modify.
+ @param args command-line arguments.]]>
+ </doc>
+ </constructor>
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, org.apache.commons.cli.Options, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser</code> to parse given options as well
+ as generic Hadoop options.
+
+ The resulting <code>CommandLine</code> object can be obtained by
+ {@link #getCommandLine()}.
+
+ @param conf the configuration to modify
+ @param options options built by the caller
+ @param args User-specified arguments]]>
+ </doc>
+ </constructor>
+ <method name="getRemainingArgs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array of Strings containing only application-specific arguments.
+
+ @return array of <code>String</code>s containing the un-parsed arguments
+ or <strong>empty array</strong> if commandLine was not defined.]]>
+ </doc>
+ </method>
+ <method name="getCommandLine" return="org.apache.commons.cli.CommandLine"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the commons-cli <code>CommandLine</code> object
+ to process the parsed arguments.
+
+ Note: If the object is created with
+ {@link #GenericOptionsParser(Configuration, String[])}, then returned
+ object will only contain parsed generic options.
+
+ @return <code>CommandLine</code> representing list of arguments
+ parsed against Options descriptor.]]>
+ </doc>
+ </method>
+ <method name="getLibJars" return="java.net.URL[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If libjars are set in the conf, parse the libjars.
+ @param conf
+ @return libjar urls
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Print the usage message for generic command-line options supported.
+
+ @param out stream to print the usage message to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>GenericOptionsParser</code> is a utility to parse command line
+ arguments generic to the Hadoop framework.
+
+ <code>GenericOptionsParser</code> recognizes several standarad command
+ line arguments, enabling applications to easily specify a namenode, a
+ jobtracker, additional configuration resources etc.
+
+ <h4 id="GenericOptions">Generic Options</h4>
+
+ <p>The supported generic options are:</p>
+ <p><blockquote><pre>
+ -conf &lt;configuration file&gt; specify a configuration file
+ -D &lt;property=value&gt; use value for given property
+ -fs &lt;local|namenode:port&gt; specify a namenode
+ -jt &lt;local|jobtracker:port&gt; specify a job tracker
+ -files &lt;comma separated list of files&gt; specify comma separated
+ files to be copied to the map reduce cluster
+ -libjars &lt;comma separated list of jars&gt; specify comma separated
+ jar files to include in the classpath.
+ -archives &lt;comma separated list of archives&gt; specify comma
+ separated archives to be unarchived on the compute machines.
+
+ </pre></blockquote></p>
+
+ <p>The general command line syntax is:</p>
+ <p><tt><pre>
+ bin/hadoop command [genericOptions] [commandOptions]
+ </pre></tt></p>
+
+ <p>Generic command line arguments <strong>might</strong> modify
+ <code>Configuration </code> objects, given to constructors.</p>
+
+ <p>The functionality is implemented using Commons CLI.</p>
+
+ <p>Examples:</p>
+ <p><blockquote><pre>
+ $ bin/hadoop dfs -fs darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -conf hadoop-site.xml -ls /data
+ list /data directory in dfs with conf specified in hadoop-site.xml
+
+ $ bin/hadoop job -D mapred.job.tracker=darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt local -submit job.xml
+ submit a job to local runner
+
+ $ bin/hadoop jar -libjars testlib.jar
+ -archives test.tgz -files file.txt inputjar args
+ job submission with libjars, files and archives
+ </pre></blockquote></p>
+
+ @see Tool
+ @see ToolRunner]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericOptionsParser -->
+ <!-- start class org.apache.hadoop.util.GenericsUtil -->
+ <class name="GenericsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericsUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClass" return="java.lang.Class&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="T"/>
+ <doc>
+ <![CDATA[Returns the Class object (of type <code>Class&lt;T&gt;</code>) of the
+ argument of type <code>T</code>.
+ @param <T> The type of the argument
+ @param t the object to get it class
+ @return <code>Class&lt;T&gt;</code>]]>
+ </doc>
+ </method>
+ <method name="toArray" return="T[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class&lt;T&gt;"/>
+ <param name="list" type="java.util.List&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param c the Class object of the items in the list
+ @param list the list to convert]]>
+ </doc>
+ </method>
+ <method name="toArray" return="T[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="list" type="java.util.List&lt;T&gt;"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param list the list to convert
+ @throws ArrayIndexOutOfBoundsException if the list is empty.
+ Use {@link #toArray(Class, List)} if the list may be empty.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Contains utility methods for dealing with Java Generics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericsUtil -->
+ <!-- start class org.apache.hadoop.util.HeapSort -->
+ <class name="HeapSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="HeapSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the given range of items using heap sort.
+ {@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of HeapSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.HeapSort -->
+ <!-- start class org.apache.hadoop.util.HostsFileReader -->
+ <class name="HostsFileReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HostsFileReader" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="refresh"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getHosts" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExcludedHosts" return="java.util.Set&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setIncludesFile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="includesFile" type="java.lang.String"/>
+ </method>
+ <method name="setExcludesFile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="excludesFile" type="java.lang.String"/>
+ </method>
+ <method name="updateFileNames"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="includesFile" type="java.lang.String"/>
+ <param name="excludesFile" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.util.HostsFileReader -->
+ <!-- start interface org.apache.hadoop.util.IndexedSortable -->
+ <interface name="IndexedSortable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Compare items at the given addresses consistent with the semantics of
+ {@link java.util.Comparable#compare}.]]>
+ </doc>
+ </method>
+ <method name="swap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Swap items at the given addresses.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for collections capable of being sorted by {@link IndexedSorter}
+ algorithms.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSortable -->
+ <!-- start interface org.apache.hadoop.util.IndexedSorter -->
+ <interface name="IndexedSorter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the items accessed through the given IndexedSortable over the given
+ range of logical indices. From the perspective of the sort algorithm,
+ each index between l (inclusive) and r (exclusive) is an addressable
+ entry.
+ @see IndexedSortable#compare
+ @see IndexedSortable#swap]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Same as {@link #sort(IndexedSortable,int,int)}, but indicate progress
+ periodically.
+ @see #sort(IndexedSortable,int,int)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for sort algorithms accepting {@link IndexedSortable} items.
+
+ A sort algorithm implementing this interface may only
+ {@link IndexedSortable#compare} and {@link IndexedSortable#swap} items
+ for a range of indices to effect a sort across that range.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSorter -->
+ <!-- start class org.apache.hadoop.util.LineReader -->
+ <class name="LineReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LineReader" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ default buffer-size (64k).
+ @param in The input stream
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="LineReader" type="java.io.InputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ given buffer-size.
+ @param in The input stream
+ @param bufferSize Size of the read buffer
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ <code>io.file.buffer.size</code> specified in the given
+ <code>Configuration</code>.
+ @param in input stream
+ @param conf configuration
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the underlying stream.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <param name="maxLineLength" type="int"/>
+ <param name="maxBytesToConsume" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @param maxLineLength the maximum number of bytes to store into str.
+ @param maxBytesToConsume the maximum number of bytes to consume in this call.
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <param name="maxLineLength" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @param maxLineLength the maximum number of bytes to store into str.
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides a line reader from an input stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.LineReader -->
+ <!-- start class org.apache.hadoop.util.MergeSort -->
+ <class name="MergeSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MergeSort" type="java.util.Comparator&lt;org.apache.hadoop.io.IntWritable&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mergeSort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="int[]"/>
+ <param name="dest" type="int[]"/>
+ <param name="low" type="int"/>
+ <param name="high" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of MergeSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.MergeSort -->
+ <!-- start class org.apache.hadoop.util.NativeCodeLoader -->
+ <class name="NativeCodeLoader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeCodeLoader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeCodeLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if native-hadoop code is loaded for this platform.
+
+ @return <code>true</code> if native-hadoop is loaded,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getLoadNativeLibraries" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return if native hadoop libraries, if present, can be used for this job.
+ @param conf configuration
+
+ @return <code>true</code> if native hadoop libraries, if present, can be
+ used for this job; <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setLoadNativeLibraries"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="loadNativeLibraries" type="boolean"/>
+ <doc>
+ <![CDATA[Set if native hadoop libraries, if present, can be used for this job.
+
+ @param conf configuration
+ @param loadNativeLibraries can native hadoop libraries be loaded]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A helper to load the native hadoop code i.e. libhadoop.so.
+ This handles the fallback to either the bundled libhadoop-Linux-i386-32.so
+ or the the default java implementations where appropriate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.NativeCodeLoader -->
+ <!-- start class org.apache.hadoop.util.PlatformName -->
+ <class name="PlatformName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PlatformName"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPlatformName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete platform as per the java-vm.
+ @return returns the complete platform as per the java-vm.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[A helper class for getting build-info of the java-vm.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PlatformName -->
+ <!-- start class org.apache.hadoop.util.PrintJarMainClass -->
+ <class name="PrintJarMainClass" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PrintJarMainClass"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A micro-application that prints the main class name out of a jar file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PrintJarMainClass -->
+ <!-- start class org.apache.hadoop.util.PriorityQueue -->
+ <class name="PriorityQueue" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PriorityQueue"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="lessThan" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Determines the ordering of objects in this priority queue. Subclasses
+ must define this one method.]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="maxSize" type="int"/>
+ <doc>
+ <![CDATA[Subclass constructors must call this.]]>
+ </doc>
+ </method>
+ <method name="put"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="T"/>
+ <doc>
+ <![CDATA[Adds an Object to a PriorityQueue in log(size) time.
+ If one tries to add more objects than maxSize from initialize
+ a RuntimeException (ArrayIndexOutOfBound) is thrown.]]>
+ </doc>
+ </method>
+ <method name="insert" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="T"/>
+ <doc>
+ <![CDATA[Adds element to the PriorityQueue in log(size) time if either
+ the PriorityQueue is not full, or not lessThan(element, top()).
+ @param element
+ @return true if element is added, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="top" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the least element of the PriorityQueue in constant time.]]>
+ </doc>
+ </method>
+ <method name="pop" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes and returns the least element of the PriorityQueue in log(size)
+ time.]]>
+ </doc>
+ </method>
+ <method name="adjustTop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should be called when the Object at top changes values. Still log(n)
+ worst case, but it's at least twice as fast to <pre>
+ { pq.top().change(); pq.adjustTop(); }
+ </pre> instead of <pre>
+ { o = pq.pop(); o.change(); pq.push(o); }
+ </pre>]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of elements currently stored in the PriorityQueue.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes all entries from the PriorityQueue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A PriorityQueue maintains a partial ordering of its elements such that the
+ least element can always be found in constant time. Put()'s and pop()'s
+ require log(size) time.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PriorityQueue -->
+ <!-- start class org.apache.hadoop.util.ProcfsBasedProcessTree -->
+ <class name="ProcfsBasedProcessTree" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ProcfsBasedProcessTree" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setSigKillInterval"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="interval" type="long"/>
+ </method>
+ <method name="isAvailable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Checks if the ProcfsBasedProcessTree is available on this system.
+
+ @return true if ProcfsBasedProcessTree is available. False otherwise.]]>
+ </doc>
+ </method>
+ <method name="getProcessTree" return="org.apache.hadoop.util.ProcfsBasedProcessTree"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the process-tree with latest state.
+
+ @return the process-tree with latest state.]]>
+ </doc>
+ </method>
+ <method name="isAlive" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the process-tree alive? Currently we care only about the status of the
+ root-process.
+
+ @return true if the process-true is alive, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="destroy"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Destroy the process-tree. Currently we only make sure the root process is
+ gone. It is the responsibility of the root process to make sure that all
+ its descendants are cleaned up.]]>
+ </doc>
+ </method>
+ <method name="getCumulativeVmem" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the cumulative virtual memory used by all the processes in the
+ process-tree.
+
+ @return cumulative virtual memory used by the process-tree in kilobytes.]]>
+ </doc>
+ </method>
+ <method name="getPidFromPidFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pidFileName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get PID from a pid-file.
+
+ @param pidFileName
+ Name of the pid-file.
+ @return the PID string read from the pid-file. Returns null if the
+ pidFileName points to a non-existing file or if read fails from the
+ file.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string printing PIDs of process present in the
+ ProcfsBasedProcessTree. Output format : [pid pid ..]]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_SLEEPTIME_BEFORE_SIGKILL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Proc file-system based ProcessTree. Works only on Linux.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ProcfsBasedProcessTree -->
+ <!-- start class org.apache.hadoop.util.ProgramDriver -->
+ <class name="ProgramDriver" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ProgramDriver"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="mainClass" type="java.lang.Class"/>
+ <param name="description" type="java.lang.String"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is the method that adds the classed to the repository
+ @param name The name of the string you want the class instance to be called with
+ @param mainClass The class that you want to add to the repository
+ @param description The description of the class
+ @throws NoSuchMethodException
+ @throws SecurityException]]>
+ </doc>
+ </method>
+ <method name="driver"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is a driver for the example programs.
+ It looks at the first command line argument and tries to find an
+ example program with that name.
+ If it is found, it calls the main method in that class with the rest
+ of the command line arguments.
+ @param args The argument from the user. args[0] is the command to run.
+ @throws NoSuchMethodException
+ @throws SecurityException
+ @throws IllegalAccessException
+ @throws IllegalArgumentException
+ @throws Throwable Anything thrown by the example program's main]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A driver that is used to run programs added to it]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ProgramDriver -->
+ <!-- start class org.apache.hadoop.util.Progress -->
+ <class name="Progress" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Progress"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new root node.]]>
+ </doc>
+ </constructor>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a named node to the tree.]]>
+ </doc>
+ </method>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds a node to the tree.]]>
+ </doc>
+ </method>
+ <method name="startNextPhase"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Called during execution to move to the next phase at this level in the
+ tree.]]>
+ </doc>
+ </method>
+ <method name="phase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current sub-node executing.]]>
+ </doc>
+ </method>
+ <method name="complete"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Completes this node, moving the parent node to its next child.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progress" type="float"/>
+ <doc>
+ <![CDATA[Called during execution on a leaf node to set its progress.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the overall progress of the root.]]>
+ </doc>
+ </method>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Utility to assist with generation of progress reports. Applications build
+ a hierarchy of {@link Progress} instances, each modelling a phase of
+ execution. The root is constructed with {@link #Progress()}. Nodes for
+ sub-phases are created by calling {@link #addPhase()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Progress -->
+ <!-- start interface org.apache.hadoop.util.Progressable -->
+ <interface name="Progressable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Report progress to the Hadoop framework.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for reporting progress.
+
+ <p>Clients and/or applications can use the provided <code>Progressable</code>
+ to explicitly report progress to the Hadoop framework. This is especially
+ important for operations which take an insignificant amount of time since,
+ in-lieu of the reported progress, the framework has to assume that an error
+ has occured and time-out the operation.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Progressable -->
+ <!-- start class org.apache.hadoop.util.QuickSort -->
+ <class name="QuickSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="QuickSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMaxDepth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="x" type="int"/>
+ <doc>
+ <![CDATA[Deepest recursion before giving up and doing a heapsort.
+ Returns 2 * ceil(log(n)).]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the given range of items using quick sort.
+ {@inheritDoc} If the recursion depth falls below {@link #getMaxDepth},
+ then switch to {@link HeapSort}.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of QuickSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.QuickSort -->
+ <!-- start class org.apache.hadoop.util.ReflectionUtils -->
+ <class name="ReflectionUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReflectionUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theObject" type="java.lang.Object"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check and set 'configuration' if necessary.
+
+ @param theObject object for which to set configuration
+ @param conf Configuration]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="T"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class&lt;T&gt;"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create an object for the given class and initialize it from conf
+
+ @param theClass class of which an object is created
+ @param conf Configuration
+ @return a new object]]>
+ </doc>
+ </method>
+ <method name="setContentionTracing"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="boolean"/>
+ </method>
+ <method name="printThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.PrintWriter"/>
+ <param name="title" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Print all of the thread's information and stack traces.
+
+ @param stream the stream to
+ @param title a string title for the stack trace]]>
+ </doc>
+ </method>
+ <method name="logThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="title" type="java.lang.String"/>
+ <param name="minInterval" type="long"/>
+ <doc>
+ <![CDATA[Log the current thread stacks at INFO level.
+ @param log the logger that logs the stack trace
+ @param title a descriptive title for the call stacks
+ @param minInterval the minimum time from the last]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class&lt;T&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="T"/>
+ <doc>
+ <![CDATA[Return the correctly-typed {@link Class} of the given object.
+
+ @param o object whose correctly-typed <code>Class</code> is to be obtained
+ @return the correctly typed <code>Class</code> of the given object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General reflection utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ReflectionUtils -->
+ <!-- start class org.apache.hadoop.util.RunJar -->
+ <class name="RunJar" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RunJar"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="unJar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jarFile" type="java.io.File"/>
+ <param name="toDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unpack a jar file into a directory.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Run a Hadoop job jar. If the main class is not in the jar's manifest,
+ then it must be provided on the command line.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Run a Hadoop job jar.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.RunJar -->
+ <!-- start class org.apache.hadoop.util.ServletUtil -->
+ <class name="ServletUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ServletUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initHTML" return="java.io.PrintWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="response" type="javax.servlet.ServletResponse"/>
+ <param name="title" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initial HTML header]]>
+ </doc>
+ </method>
+ <method name="getParameter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.ServletRequest"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a parameter from a ServletRequest.
+ Return null if the parameter contains only white spaces.]]>
+ </doc>
+ </method>
+ <method name="htmlFooter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HTML footer to be added in the jsps.
+ @return the HTML footer.]]>
+ </doc>
+ </method>
+ <method name="percentageGraph" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="perc" type="int"/>
+ <param name="width" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate the percentage graph and returns HTML representation string
+ of the same.
+
+ @param perc The percentage value for which graph is to be generated
+ @param width The width of the display table
+ @return HTML String representation of the percentage graph
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="percentageGraph" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="perc" type="float"/>
+ <param name="width" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate the percentage graph and returns HTML representation string
+ of the same.
+ @param perc The percentage value for which graph is to be generated
+ @param width The width of the display table
+ @return HTML String representation of the percentage graph
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="HTML_TAIL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.util.ServletUtil -->
+ <!-- start class org.apache.hadoop.util.Shell -->
+ <class name="Shell" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param interval the minimum duration to wait before re-executing the
+ command.]]>
+ </doc>
+ </constructor>
+ <method name="getGROUPS_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's groups list]]>
+ </doc>
+ </method>
+ <method name="getGET_PERMISSION_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a Unix command to get permission information.]]>
+ </doc>
+ </method>
+ <method name="getUlimitMemoryCommand" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the Unix command for setting the maximum virtual memory available
+ to a given child process. This is only relevant when we are forking a
+ process from within the {@link org.apache.hadoop.mapred.Mapper} or the
+ {@link org.apache.hadoop.mapred.Reducer} implementations
+ e.g. <a href="{@docRoot}/org/apache/hadoop/mapred/pipes/package-summary.html">Hadoop Pipes</a>
+ or <a href="{@docRoot}/org/apache/hadoop/streaming/package-summary.html">Hadoop Streaming</a>.
+
+ It also checks to ensure that we are running on a *nix platform else
+ (e.g. in Cygwin/Windows) it returns <code>null</code>.
+ @param conf configuration
+ @return a <code>String[]</code> with the ulimit command arguments or
+ <code>null</code> if we are running on a non *nix platform or
+ if the limit is unspecified.]]>
+ </doc>
+ </method>
+ <method name="setEnvironment"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="env" type="java.util.Map&lt;java.lang.String, java.lang.String&gt;"/>
+ <doc>
+ <![CDATA[set the environment for the command
+ @param env Mapping of environment variables]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[set the working directory
+ @param dir The directory where the command would be executed]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[check to see if a command needs to be executed and execute if needed]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return an array containing the command name & its parameters]]>
+ </doc>
+ </method>
+ <method name="parseExecResult"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the execution result]]>
+ </doc>
+ </method>
+ <method name="getProcess" return="java.lang.Process"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the current sub-process executing the given command
+ @return process executing the command]]>
+ </doc>
+ </method>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the exit code
+ @return the exit code of the process]]>
+ </doc>
+ </method>
+ <method name="execCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Static method to execute a shell command.
+ Covers most of the simple cases without requiring the user to implement
+ the <code>Shell</code> interface.
+ @param cmd shell command to execute.
+ @return the output of the executed command.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USER_NAME_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's name]]>
+ </doc>
+ </field>
+ <field name="SET_PERMISSION_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set permission]]>
+ </doc>
+ </field>
+ <field name="SET_OWNER_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set owner]]>
+ </doc>
+ </field>
+ <field name="SET_GROUP_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WINDOWS" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set to true on Windows platforms]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A base class for running a Unix command.
+
+ <code>Shell</code> can be used to run unix commands like <code>du</code> or
+ <code>df</code>. It also offers facilities to gate commands by
+ time-intervals.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell -->
+ <!-- start class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <class name="Shell.ExitCodeException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ExitCodeException" type="int, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is an IOException with exit code added.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <!-- start class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <class name="Shell.ShellCommandExecutor" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File, java.util.Map&lt;java.lang.String, java.lang.String&gt;"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the shell command.]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getOutput" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the output of the shell command.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the commands of this instance.
+ Arguments with spaces in are presented with quotes round; other
+ arguments are presented raw
+
+ @return a string representation of the object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple shell command executor.
+
+ <code>ShellCommandExecutor</code>should be used in cases where the output
+ of the command needs no explicit parsing and where the command, working
+ directory and the environment remains unchanged. The output of the command
+ is stored as-is and is expected to be small.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <!-- start class org.apache.hadoop.util.StringUtils -->
+ <class name="StringUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StringUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stringifyException" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Make a string representation of the exception.
+ @param e The exception to stringify
+ @return A string with exception name and call stack.]]>
+ </doc>
+ </method>
+ <method name="simpleHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fullHostname" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a full hostname, return the word upto the first dot.
+ @param fullHostname the full hostname
+ @return the hostname to the first dot]]>
+ </doc>
+ </method>
+ <method name="humanReadableInt" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="number" type="long"/>
+ <doc>
+ <![CDATA[Given an integer, return a string that is in an approximate, but human
+ readable format.
+ It uses the bases 'k', 'm', and 'g' for 1024, 1024**2, and 1024**3.
+ @param number the number to format
+ @return a human readable form of the integer]]>
+ </doc>
+ </method>
+ <method name="formatPercent" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="done" type="double"/>
+ <param name="digits" type="int"/>
+ <doc>
+ <![CDATA[Format a percentage for presentation to the user.
+ @param done the percentage to format (0.0 to 1.0)
+ @param digits the number of digits past the decimal point
+ @return a string representation of the percentage]]>
+ </doc>
+ </method>
+ <method name="arrayToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strs" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Given an array of strings, return a comma-separated list of its elements.
+ @param strs Array of strings
+ @return Empty string if strs.length is 0, comma separated list of strings
+ otherwise]]>
+ </doc>
+ </method>
+ <method name="byteToHexString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="end" type="int"/>
+ <doc>
+ <![CDATA[Given an array of bytes it will convert the bytes to a hex string
+ representation of the bytes
+ @param bytes
+ @param start start index, inclusively
+ @param end end index, exclusively
+ @return hex string representation of the byte array]]>
+ </doc>
+ </method>
+ <method name="byteToHexString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Same as byteToHexString(bytes, 0, bytes.length).]]>
+ </doc>
+ </method>
+ <method name="hexStringToByte" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a hexstring this will return the byte array corresponding to the
+ string
+ @param hex the hex String array
+ @return a byte array that is a hex string representation of the given
+ string. The size of the byte array is therefore hex.length/2]]>
+ </doc>
+ </method>
+ <method name="uriToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uris" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[@param uris]]>
+ </doc>
+ </method>
+ <method name="stringToURI" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="stringToPath" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="formatTimeDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Given a finish and start time in long milliseconds, returns a
+ String in the format Xhrs, Ymins, Z sec, for the time difference between two times.
+ If finish time comes before start time then negative valeus of X, Y and Z wil return.
+
+ @param finishTime finish time
+ @param startTime start time]]>
+ </doc>
+ </method>
+ <method name="formatTime" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="timeDiff" type="long"/>
+ <doc>
+ <![CDATA[Given the time in long milliseconds, returns a
+ String in the format Xhrs, Ymins, Z sec.
+
+ @param timeDiff The time difference to format]]>
+ </doc>
+ </method>
+ <method name="getFormattedTimeWithDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dateFormat" type="java.text.DateFormat"/>
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Formats time in ms and appends difference (finishTime - startTime)
+ as returned by formatTimeDiff().
+ If finish time is 0, empty string is returned, if start time is 0
+ then difference is not appended to return value.
+ @param dateFormat date format to use
+ @param finishTime fnish time
+ @param startTime start time
+ @return formatted value.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an arraylist of strings.
+ @param str the comma seperated string values
+ @return the arraylist of the comma seperated string values]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection&lt;java.lang.String&gt;"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a collection of strings.
+ @param str comma seperated string values
+ @return an <code>ArrayList</code> of string values]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Split a string using the default separator
+ @param str a string that may have escaped separator
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="separator" type="char"/>
+ <doc>
+ <![CDATA[Split a string using the given separator
+ @param str a string that may have escaped separator
+ @param escapeChar a char that be used to escape the separator
+ @param separator a separator char
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="findNext" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="separator" type="char"/>
+ <param name="escapeChar" type="char"/>
+ <param name="start" type="int"/>
+ <param name="split" type="java.lang.StringBuilder"/>
+ <doc>
+ <![CDATA[Finds the first occurrence of the separator character ignoring the escaped
+ separators starting from the index. Note the substring between the index
+ and the position of the separator is passed.
+ @param str the source string
+ @param separator the character to find
+ @param escapeChar character used to escape
+ @param start from where to search
+ @param split used to pass back the extracted string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Escape commas in the string using the default escape char
+ @param str a string
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Escape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the char to be escaped
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charsToEscape" type="char[]"/>
+ <doc>
+ <![CDATA[@param charsToEscape array of characters to be escaped]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Unescape commas in the string using the default escape char
+ @param str a string
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Unescape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the escaped char
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charsToEscape" type="char[]"/>
+ <doc>
+ <![CDATA[@param charsToEscape array of characters to unescape]]>
+ </doc>
+ </method>
+ <method name="getHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return hostname without throwing exception.
+ @return hostname]]>
+ </doc>
+ </method>
+ <method name="startupShutdownMessage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class&lt;?&gt;"/>
+ <param name="args" type="java.lang.String[]"/>
+ <param name="LOG" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Print a log message for starting up and shutting down
+ @param clazz the class of the server
+ @param args arguments
+ @param LOG the target log object]]>
+ </doc>
+ </method>
+ <method name="escapeHTML" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Escapes HTML Special characters present in the string.
+ @param string
+ @return HTML Escaped String representation]]>
+ </doc>
+ </method>
+ <field name="COMMA" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ESCAPE_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[General string utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.StringUtils -->
+ <!-- start class org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix -->
+ <class name="StringUtils.TraditionalBinaryPrefix" extends="java.lang.Enum&lt;org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix&gt;"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="symbol" type="char"/>
+ <doc>
+ <![CDATA[@return The TraditionalBinaryPrefix object corresponding to the symbol.]]>
+ </doc>
+ </method>
+ <method name="string2long" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convert a string to long.
+ The input string is first be trimmed
+ and then it is parsed with traditional binary prefix.
+
+ For example,
+ "-1230k" will be converted to -1230 * 1024 = -1259520;
+ "891g" will be converted to 891 * 1024^3 = 956703965184;
+
+ @param s input string
+ @return a long value represented by the input string.]]>
+ </doc>
+ </method>
+ <field name="value" type="long"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="symbol" type="char"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The traditional binary prefixes, kilo, mega, ..., exa,
+ which can be represented by a 64-bit integer.
+ TraditionalBinaryPrefix symbol are case insensitive.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix -->
+ <!-- start interface org.apache.hadoop.util.Tool -->
+ <interface name="Tool" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Execute the command with the given arguments.
+
+ @param args command specific arguments.
+ @return exit code.
+ @throws Exception]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A tool interface that supports handling of generic command-line options.
+
+ <p><code>Tool</code>, is the standard for any Map-Reduce tool/application.
+ The tool/application should delegate the handling of
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ standard command-line options</a> to {@link ToolRunner#run(Tool, String[])}
+ and only handle its custom arguments.</p>
+
+ <p>Here is how a typical <code>Tool</code> is implemented:</p>
+ <p><blockquote><pre>
+ public class MyApp extends Configured implements Tool {
+
+ public int run(String[] args) throws Exception {
+ // <code>Configuration</code> processed by <code>ToolRunner</code>
+ Configuration conf = getConf();
+
+ // Create a JobConf using the processed <code>conf</code>
+ JobConf job = new JobConf(conf, MyApp.class);
+
+ // Process custom command-line options
+ Path in = new Path(args[1]);
+ Path out = new Path(args[2]);
+
+ // Specify various job-specific parameters
+ job.setJobName("my-app");
+ job.setInputPath(in);
+ job.setOutputPath(out);
+ job.setMapperClass(MyApp.MyMapper.class);
+ job.setReducerClass(MyApp.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ }
+
+ public static void main(String[] args) throws Exception {
+ // Let <code>ToolRunner</code> handle generic command-line options
+ int res = ToolRunner.run(new Configuration(), new Sort(), args);
+
+ System.exit(res);
+ }
+ }
+ </pre></blockquote></p>
+
+ @see GenericOptionsParser
+ @see ToolRunner]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Tool -->
+ <!-- start class org.apache.hadoop.util.ToolRunner -->
+ <class name="ToolRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ToolRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the given <code>Tool</code> by {@link Tool#run(String[])}, after
+ parsing with the given generic arguments. Uses the given
+ <code>Configuration</code>, or builds one if null.
+
+ Sets the <code>Tool</code>'s configuration with the possibly modified
+ version of the <code>conf</code>.
+
+ @param conf <code>Configuration</code> for the <code>Tool</code>.
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the <code>Tool</code> with its <code>Configuration</code>.
+
+ Equivalent to <code>run(tool.getConf(), tool, args)</code>.
+
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Prints generic command-line argurments and usage information.
+
+ @param out stream to write usage information to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A utility to help run {@link Tool}s.
+
+ <p><code>ToolRunner</code> can be used to run classes implementing
+ <code>Tool</code> interface. It works in conjunction with
+ {@link GenericOptionsParser} to parse the
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ generic hadoop command line arguments</a> and modifies the
+ <code>Configuration</code> of the <code>Tool</code>. The
+ application-specific options are passed along without being modified.
+ </p>
+
+ @see Tool
+ @see GenericOptionsParser]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ToolRunner -->
+ <!-- start class org.apache.hadoop.util.UTF8ByteArrayUtils -->
+ <class name="UTF8ByteArrayUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UTF8ByteArrayUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="findByte" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="end" type="int"/>
+ <param name="b" type="byte"/>
+ <doc>
+ <![CDATA[Find the first occurrence of the given byte b in a UTF-8 encoded string
+ @param utf a byte array containing a UTF-8 encoded string
+ @param start starting offset
+ @param end ending position
+ @param b the byte to find
+ @return position that first byte occures otherwise -1]]>
+ </doc>
+ </method>
+ <method name="findBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="end" type="int"/>
+ <param name="b" type="byte[]"/>
+ <doc>
+ <![CDATA[Find the first occurrence of the given bytes b in a UTF-8 encoded string
+ @param utf a byte array containing a UTF-8 encoded string
+ @param start starting offset
+ @param end ending position
+ @param b the bytes to find
+ @return position that first byte occures otherwise -1]]>
+ </doc>
+ </method>
+ <method name="findNthByte" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="b" type="byte"/>
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Find the nth occurrence of the given byte b in a UTF-8 encoded string
+ @param utf a byte array containing a UTF-8 encoded string
+ @param start starting offset
+ @param length the length of byte array
+ @param b the byte to find
+ @param n the desired occurrence of the given byte
+ @return position that nth occurrence of the given byte if exists; otherwise -1]]>
+ </doc>
+ </method>
+ <method name="findNthByte" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="b" type="byte"/>
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Find the nth occurrence of the given byte b in a UTF-8 encoded string
+ @param utf a byte array containing a UTF-8 encoded string
+ @param b the byte to find
+ @param n the desired occurrence of the given byte
+ @return position that nth occurrence of the given byte if exists; otherwise -1]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.util.UTF8ByteArrayUtils -->
+ <!-- start class org.apache.hadoop.util.VersionInfo -->
+ <class name="VersionInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the Hadoop version.
+ @return the Hadoop version string, eg. "0.6.3-dev"]]>
+ </doc>
+ </method>
+ <method name="getRevision" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion revision number for the root directory
+ @return the revision number, eg. "451451"]]>
+ </doc>
+ </method>
+ <method name="getDate" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The date that Hadoop was compiled.
+ @return the compilation date in unix date format]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The user that compiled Hadoop.
+ @return the username of the user]]>
+ </doc>
+ </method>
+ <method name="getUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion URL for the root Hadoop directory.]]>
+ </doc>
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the buildVersion which includes version,
+ revision, user and date.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[This class finds the package info for Hadoop and the HadoopVersionAnnotation
+ information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.VersionInfo -->
+ <!-- start class org.apache.hadoop.util.XMLUtils -->
+ <class name="XMLUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="XMLUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="transform"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="styleSheet" type="java.io.InputStream"/>
+ <param name="xml" type="java.io.InputStream"/>
+ <param name="out" type="java.io.Writer"/>
+ <exception name="TransformerConfigurationException" type="javax.xml.transform.TransformerConfigurationException"/>
+ <exception name="TransformerException" type="javax.xml.transform.TransformerException"/>
+ <doc>
+ <![CDATA[Transform input xml given a stylesheet.
+
+ @param styleSheet the style-sheet
+ @param xml input xml data
+ @param out output
+ @throws TransformerConfigurationException
+ @throws TransformerException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General xml utilities.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.XMLUtils -->
+</package>
+
+</api>
diff --git a/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.20.0.xml b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.20.0.xml
new file mode 100644
index 0000000000..9067cf1158
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jdiff/hadoop_0.20.0.xml
@@ -0,0 +1,52140 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Thu Apr 09 05:21:56 UTC 2009 -->
+
+<api
+ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+ xsi:noNamespaceSchemaLocation='api.xsd'
+ name="hadoop 0.20.0"
+ jdversion="1.0.9">
+
+<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/ndaley/hadoop/branch-0.20/build/ivy/lib/Hadoop/jdiff/jdiff-1.0.9.jar:/home/ndaley/hadoop/branch-0.20/build/ivy/lib/Hadoop/jdiff/xerces-1.4.4.jar -classpath /home/ndaley/hadoop/branch-0.20/build/classes:/home/ndaley/hadoop/branch-0.20/lib/commons-cli-2.0-SNAPSHOT.jar:/home/ndaley/hadoop/branch-0.20/lib/hsqldb-1.8.0.10.jar:/home/ndaley/hadoop/branch-0.20/lib/jsp-2.1/jsp-2.1.jar:/home/ndaley/hadoop/branch-0.20/lib/jsp-2.1/jsp-api-2.1.jar:/home/ndaley/hadoop/branch-0.20/lib/kfs-0.2.2.jar:/home/ndaley/hadoop/branch-0.20/conf:/home/ndaley/.ivy2/cache/commons-logging/commons-logging/jars/commons-logging-1.0.4.jar:/home/ndaley/.ivy2/cache/log4j/log4j/jars/log4j-1.2.15.jar:/home/ndaley/.ivy2/cache/commons-httpclient/commons-httpclient/jars/commons-httpclient-3.0.1.jar:/home/ndaley/.ivy2/cache/commons-codec/commons-codec/jars/commons-codec-1.3.jar:/home/ndaley/.ivy2/cache/xmlenc/xmlenc/jars/xmlenc-0.52.jar:/home/ndaley/.ivy2/cache/net.java.dev.jets3t/jets3t/jars/jets3t-0.6.1.jar:/home/ndaley/.ivy2/cache/commons-net/commons-net/jars/commons-net-1.4.1.jar:/home/ndaley/.ivy2/cache/org.mortbay.jetty/servlet-api-2.5/jars/servlet-api-2.5-6.1.14.jar:/home/ndaley/.ivy2/cache/oro/oro/jars/oro-2.0.8.jar:/home/ndaley/.ivy2/cache/org.mortbay.jetty/jetty/jars/jetty-6.1.14.jar:/home/ndaley/.ivy2/cache/org.mortbay.jetty/jetty-util/jars/jetty-util-6.1.14.jar:/home/ndaley/.ivy2/cache/tomcat/jasper-runtime/jars/jasper-runtime-5.5.12.jar:/home/ndaley/.ivy2/cache/tomcat/jasper-compiler/jars/jasper-compiler-5.5.12.jar:/home/ndaley/.ivy2/cache/commons-el/commons-el/jars/commons-el-1.0.jar:/home/ndaley/.ivy2/cache/junit/junit/jars/junit-3.8.1.jar:/home/ndaley/.ivy2/cache/commons-logging/commons-logging-api/jars/commons-logging-api-1.0.4.jar:/home/ndaley/.ivy2/cache/org.slf4j/slf4j-api/jars/slf4j-api-1.4.3.jar:/home/ndaley/.ivy2/cache/org.eclipse.jdt/core/jars/core-3.1.1.jar:/home/ndaley/.ivy2/cache/org.slf4j/slf4j-log4j12/jars/slf4j-log4j12-1.4.3.jar:/home/ndaley/.ivy2/cache/jdiff/jdiff/jars/jdiff-1.0.9.jar:/home/ndaley/.ivy2/cache/xerces/xerces/jars/xerces-1.4.4.jar:/home/ndaley/tools/ant/latest/lib/ant-launcher.jar:/home/ndaley/tools/ant/latest/lib/ant-antlr.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-bcel.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-bsf.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-log4j.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-oro.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-regexp.jar:/home/ndaley/tools/ant/latest/lib/ant-apache-resolver.jar:/home/ndaley/tools/ant/latest/lib/ant-commons-logging.jar:/home/ndaley/tools/ant/latest/lib/ant-commons-net.jar:/home/ndaley/tools/ant/latest/lib/ant-jai.jar:/home/ndaley/tools/ant/latest/lib/ant-javamail.jar:/home/ndaley/tools/ant/latest/lib/ant-jdepend.jar:/home/ndaley/tools/ant/latest/lib/ant-jmf.jar:/home/ndaley/tools/ant/latest/lib/ant-jsch.jar:/home/ndaley/tools/ant/latest/lib/ant-junit.jar:/home/ndaley/tools/ant/latest/lib/ant-netrexx.jar:/home/ndaley/tools/ant/latest/lib/ant-nodeps.jar:/home/ndaley/tools/ant/latest/lib/ant-starteam.jar:/home/ndaley/tools/ant/latest/lib/ant-stylebook.jar:/home/ndaley/tools/ant/latest/lib/ant-swing.jar:/home/ndaley/tools/ant/latest/lib/ant-testutil.jar:/home/ndaley/tools/ant/latest/lib/ant-trax.jar:/home/ndaley/tools/ant/latest/lib/ant-weblogic.jar:/home/ndaley/tools/ant/latest/lib/ant.jar:/home/ndaley/tools/ant/latest/lib/xercesImpl.jar:/home/ndaley/tools/ant/latest/lib/xml-apis.jar:/home/hadoopqa/tools/java/jdk1.6.0_07-64bit/lib/tools.jar -sourcepath /home/ndaley/hadoop/branch-0.20/src/core:/home/ndaley/hadoop/branch-0.20/src/mapred:/home/ndaley/hadoop/branch-0.20/src/tools -apidir /home/ndaley/hadoop/branch-0.20/lib/jdiff -apiname hadoop 0.20.0 -->
+<package name="org.apache.hadoop">
+ <!-- start interface org.apache.hadoop.HadoopVersionAnnotation -->
+ <interface name="HadoopVersionAnnotation" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.annotation.Annotation"/>
+ <method name="version" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the Hadoop version
+ @return the version string "0.6.3-dev"]]>
+ </doc>
+ </method>
+ <method name="user" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the username that compiled Hadoop.]]>
+ </doc>
+ </method>
+ <method name="date" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the date when Hadoop was compiled.
+ @return the date in unix 'date' format]]>
+ </doc>
+ </method>
+ <method name="url" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the url for the subversion repository.]]>
+ </doc>
+ </method>
+ <method name="revision" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion revision.
+ @return the revision number as a string (eg. "451451")]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A package attribute that captures the version of Hadoop that was compiled.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.HadoopVersionAnnotation -->
+</package>
+<package name="org.apache.hadoop.conf">
+ <!-- start interface org.apache.hadoop.conf.Configurable -->
+ <interface name="Configurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration to be used by this object.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the configuration used by this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Something that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.conf.Configurable -->
+ <!-- start class org.apache.hadoop.conf.Configuration -->
+ <class name="Configuration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable"/>
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configuration" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration where the behavior of reading from the default
+ resources can be turned off.
+
+ If the parameter {@code loadDefaults} is false, the new instance
+ will not load resources from the default files.
+ @param loadDefaults specifies whether to load from the default files]]>
+ </doc>
+ </constructor>
+ <constructor name="Configuration" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new configuration with the same settings cloned from another.
+
+ @param other the configuration from which to clone settings.]]>
+ </doc>
+ </constructor>
+ <method name="addDefaultResource"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add a default resource. Resources are loaded in the order of the resources
+ added.
+ @param name file name. File should be present in the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param name resource to be added, the classpath is examined for a file
+ with that name.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="url" type="java.net.URL"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param url url of the resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param file file-path of resource to be added, the local filesystem is
+ examined directly to find the resource, without referring to
+ the classpath.]]>
+ </doc>
+ </method>
+ <method name="addResource"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <doc>
+ <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param in InputStream to deserialize the object from.]]>
+ </doc>
+ </method>
+ <method name="reloadConfiguration"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reload configuration from previously added resources.
+
+ This method will clear all the configuration read from the added
+ resources, and final parameters. This will make the resources to
+ be read again before accessing the values. Values that are added
+ via set methods will overlay values read from the resources.]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, <code>null</code> if
+ no such property exists.
+
+ Values are processed for <a href="#VariableExpansion">variable expansion</a>
+ before being returned.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="getRaw" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property, without doing
+ <a href="#VariableExpansion">variable expansion</a>.
+
+ @param name the property name.
+ @return the value of the <code>name</code> property,
+ or null if no such property exists.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the <code>value</code> of the <code>name</code> property.
+
+ @param name property name.
+ @param value property value.]]>
+ </doc>
+ </method>
+ <method name="setIfUnset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets a property if it is currently unset.
+ @param name the property name
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="get" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property. If no such property
+ exists, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value, or <code>defaultValue</code> if the property
+ doesn't exist.]]>
+ </doc>
+ </method>
+ <method name="getInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="int"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as an <code>int</code>.
+
+ If no such property exists, or if the specified value is not a valid
+ <code>int</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as an <code>int</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to an <code>int</code>.
+
+ @param name property name.
+ @param value <code>int</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="long"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>long</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>long</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>long</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>long</code>.
+
+ @param name property name.
+ @param value <code>long</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="getFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="float"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>float</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>float</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>float</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="float"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>float</code>.
+
+ @param name property name.
+ @param value property value.]]>
+ </doc>
+ </method>
+ <method name="getBoolean" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="boolean"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>boolean</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>boolean</code>, then <code>defaultValue</code> is returned.
+
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>boolean</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setBoolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to a <code>boolean</code>.
+
+ @param name property name.
+ @param value <code>boolean</code> value of the property.]]>
+ </doc>
+ </method>
+ <method name="setBooleanIfUnset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the given property, if it is currently unset.
+ @param name property name
+ @param value new value]]>
+ </doc>
+ </method>
+ <method name="getRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Parse the given attribute as a set of integer ranges
+ @param name the attribute name
+ @param defaultValue the default value if it is not set
+ @return a new set of ranges from the configured value]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ a collection of <code>String</code>s.
+ If no such property is specified then empty collection is returned.
+ <p>
+ This is an optimized version of {@link #getStrings(String)}
+
+ @param name property name.
+ @return property value as a collection of <code>String</code>s.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then <code>null</code> is returned.
+
+ @param name property name.
+ @return property value as an array of <code>String</code>s,
+ or <code>null</code>.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Get the comma delimited values of the <code>name</code> property as
+ an array of <code>String</code>s.
+ If no such property is specified then default value is returned.
+
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of <code>String</code>s,
+ or default value.]]>
+ </doc>
+ </method>
+ <method name="setStrings"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="values" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Set the array of string values for the <code>name</code> property as
+ as comma delimited values.
+
+ @param name property name.
+ @param values The values]]>
+ </doc>
+ </method>
+ <method name="getClassByName" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Load a class by name.
+
+ @param name the class name.
+ @return the class object.
+ @throws ClassNotFoundException if the class is not found.]]>
+ </doc>
+ </method>
+ <method name="getClasses" return="java.lang.Class[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class[]"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property
+ as an array of <code>Class</code>.
+ The value of the property specifies a list of comma separated class names.
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ @param name the property name.
+ @param defaultValue default value.
+ @return property value as a <code>Class[]</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>.
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="defaultValue" type="java.lang.Class"/>
+ <param name="xface" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>
+ implementing the interface specified by <code>xface</code>.
+
+ If no such property is specified, then <code>defaultValue</code> is
+ returned.
+
+ An exception is thrown if the returned class does not implement the named
+ interface.
+
+ @param name the class name.
+ @param defaultValue default value.
+ @param xface the interface implemented by the named class.
+ @return property value as a <code>Class</code>,
+ or <code>defaultValue</code>.]]>
+ </doc>
+ </method>
+ <method name="setClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="theClass" type="java.lang.Class"/>
+ <param name="xface" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the value of the <code>name</code> property to the name of a
+ <code>theClass</code> implementing the given interface <code>xface</code>.
+
+ An exception is thrown if <code>theClass</code> does not implement the
+ interface <code>xface</code>.
+
+ @param name property name.
+ @param theClass property value.
+ @param xface the interface implemented by the named class.]]>
+ </doc>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file under a directory named by <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dirsProp" type="java.lang.String"/>
+ <param name="path" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a local file name under a directory named in <i>dirsProp</i> with
+ the given <i>path</i>. If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code. If the selected
+ directory does not exist, an attempt is made to create it.
+
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+ </doc>
+ </method>
+ <method name="getResource" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the {@link URL} for the named resource.
+
+ @param name resource name.
+ @return the url for the named resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get an input stream attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return an input stream attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="getConfResourceAsReader" return="java.io.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a {@link Reader} attached to the configuration resource with the
+ given <code>name</code>.
+
+ @param name configuration resource name.
+ @return a reader attached to the resource.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of keys in the configuration.
+
+ @return number of keys in the configuration.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Clears all keys from the configuration.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get an {@link Iterator} to go through the list of <code>String</code>
+ key-value pairs in the configuration.
+
+ @return an iterator over the entries.]]>
+ </doc>
+ </method>
+ <method name="writeXml"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write out the non-default properties in this configuration to the give
+ {@link OutputStream}.
+
+ @param out the output stream to write to.]]>
+ </doc>
+ </method>
+ <method name="getClassLoader" return="java.lang.ClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link ClassLoader} for this job.
+
+ @return the correct class loader.]]>
+ </doc>
+ </method>
+ <method name="setClassLoader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="classLoader" type="java.lang.ClassLoader"/>
+ <doc>
+ <![CDATA[Set the class loader that will be used to load the various objects.
+
+ @param classLoader the new class loader.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setQuietMode"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="quietmode" type="boolean"/>
+ <doc>
+ <![CDATA[Set the quietness-mode.
+
+ In the quiet-mode, error and informational messages might not be logged.
+
+ @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
+ to turn it off.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[For debugging. List non-default properties to the terminal and exit.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Provides access to configuration parameters.
+
+ <h4 id="Resources">Resources</h4>
+
+ <p>Configurations are specified by resources. A resource contains a set of
+ name/value pairs as XML data. Each resource is named by either a
+ <code>String</code> or by a {@link Path}. If named by a <code>String</code>,
+ then the classpath is examined for a file with that name. If named by a
+ <code>Path</code>, then the local filesystem is examined directly, without
+ referring to the classpath.
+
+ <p>Unless explicitly turned off, Hadoop by default specifies two
+ resources, loaded in-order from the classpath: <ol>
+ <li><tt><a href="{@docRoot}/../core-default.html">core-default.xml</a>
+ </tt>: Read-only defaults for hadoop.</li>
+ <li><tt>core-site.xml</tt>: Site-specific configuration for a given hadoop
+ installation.</li>
+ </ol>
+ Applications may add additional resources, which are loaded
+ subsequent to these resources in the order they are added.
+
+ <h4 id="FinalParams">Final Parameters</h4>
+
+ <p>Configuration parameters may be declared <i>final</i>.
+ Once a resource declares a value final, no subsequently-loaded
+ resource can alter that value.
+ For example, one might define a final parameter with:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;dfs.client.buffer.dir&lt;/name&gt;
+ &lt;value&gt;/tmp/hadoop/dfs/client&lt;/value&gt;
+ <b>&lt;final&gt;true&lt;/final&gt;</b>
+ &lt;/property&gt;</pre></tt>
+
+ Administrators typically define parameters as final in
+ <tt>core-site.xml</tt> for values that user applications may not alter.
+
+ <h4 id="VariableExpansion">Variable Expansion</h4>
+
+ <p>Value strings are first processed for <i>variable expansion</i>. The
+ available properties are:<ol>
+ <li>Other properties defined in this Configuration; and, if a name is
+ undefined here,</li>
+ <li>Properties in {@link System#getProperties()}.</li>
+ </ol>
+
+ <p>For example, if a configuration resource contains the following property
+ definitions:
+ <tt><pre>
+ &lt;property&gt;
+ &lt;name&gt;basedir&lt;/name&gt;
+ &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
+ &lt;/property&gt;
+
+ &lt;property&gt;
+ &lt;name&gt;tempdir&lt;/name&gt;
+ &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt;
+ &lt;/property&gt;</pre></tt>
+
+ When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt>
+ will be resolved to another property in this Configuration, while
+ <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value
+ of the System property with that name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration -->
+ <!-- start class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <class name="Configuration.IntegerRanges" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Configuration.IntegerRanges"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Configuration.IntegerRanges" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isIncluded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Is the given value in the set of ranges
+ @param value the value to check
+ @return is the value in the ranges?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A class that represents a set of positive integer ranges. It parses
+ strings of the form: "2-3,5,7-" where ranges are separated by comma and
+ the lower/upper bounds are separated by dash. Either the lower or upper
+ bound may be omitted meaning all values up to or over. So the string
+ above means 2, 3, 5, and 7, 8, 9, ...]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configuration.IntegerRanges -->
+ <!-- start class org.apache.hadoop.conf.Configured -->
+ <class name="Configured" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Configured"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <constructor name="Configured" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Configured.]]>
+ </doc>
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Base class for things that may be configured with a {@link Configuration}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.conf.Configured -->
+</package>
+<package name="org.apache.hadoop.filecache">
+ <!-- start class org.apache.hadoop.filecache.DistributedCache -->
+ <class name="DistributedCache" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DistributedCache"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param fileStatus The file status on the dfs.
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred is
+ returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="honorSymLinkConf" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param fileStatus The file status on the dfs.
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred is
+ returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @param honorSymLinkConf if this is false, then the symlinks are not
+ created even if conf says so (this is required for an optimization in task
+ launches
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCache" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="baseDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="isArchive" type="boolean"/>
+ <param name="confFileStamp" type="long"/>
+ <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the locally cached file or archive; it could either be
+ previously cached (and valid) or copy it from the {@link FileSystem} now.
+
+ @param cache the cache to be localized, this should be specified as
+ new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema
+ or hostname:port is provided the file is assumed to be in the filesystem
+ being used in the Configuration
+ @param conf The Confguration file which contains the filesystem
+ @param baseDir The base cache Dir where you wnat to localize the files/archives
+ @param isArchive if the cache is an archive or a file. In case it is an
+ archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
+ be unzipped/unjarred/untarred automatically
+ and the directory where the archive is unzipped/unjarred/untarred
+ is returned as the Path.
+ In case of a file, the path to the file is returned
+ @param confFileStamp this is the hdfs file modification timestamp to verify that the
+ file to be cached hasn't changed since the job started
+ @param currentWorkDir this is the directory where you would want to create symlinks
+ for the locally cached files/archives
+ @return the path to directory where the archives are unjarred in case of archives,
+ the path to the file where the file is copied locally
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="releaseCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is the opposite of getlocalcache. When you are done with
+ using the cache, you need to release the cache
+ @param cache The cache URI to be released
+ @param conf configuration which contains the filesystem the cache
+ is contained in.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeRelative" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cache" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTimestamp" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="cache" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns mtime of a given cache file on hdfs.
+ @param conf configuration
+ @param cache cache file
+ @return mtime of a given cache file on hdfs
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createAllSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="jobCacheDir" type="java.io.File"/>
+ <param name="workDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method create symlinks for all files in a given dir in another directory
+ @param conf the configuration
+ @param jobCacheDir the target directory for creating symlinks
+ @param workDir the directory in which the symlinks are created
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setCacheArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archives" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of archives
+ @param archives The list of archives that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="setCacheFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="java.net.URI[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Set the configuration with the given set of files
+ @param files The list of files that need to be localized
+ @param conf Configuration which will be changed]]>
+ </doc>
+ </method>
+ <method name="getCacheArchives" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache archives set in the Configuration
+ @param conf The configuration which contains the archives
+ @return A URI array of the caches set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCacheFiles" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get cache files set in the Configuration
+ @param conf The configuration which contains the files
+ @return A URI array of the files set in the Configuration
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheArchives" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized caches
+ @param conf Configuration that contains the localized archives
+ @return A path array of localized caches
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalCacheFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the path array of the localized files
+ @param conf Configuration that contains the localized files
+ @return A path array of localized files
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getArchiveTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the archives
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getFileTimestamps" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the timestamps of the files
+ @param conf The configuration which stored the timestamps
+ @return a string array of timestamps
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setArchiveTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the archives to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of archives.
+ The order should be the same as the order in which the archives are added.]]>
+ </doc>
+ </method>
+ <method name="setFileTimestamps"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="timestamps" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This is to check the timestamp of the files to be localized
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of files.
+ The order should be the same as the order in which the files are added.]]>
+ </doc>
+ </method>
+ <method name="setLocalArchives"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized archives
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+ </doc>
+ </method>
+ <method name="setLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the conf to contain the location for localized files
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+ </doc>
+ </method>
+ <method name="addCacheArchive"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a archives to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addCacheFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Add a file to be localized to the conf
+ @param uri The uri of the cache to be localized
+ @param conf Configuration to add the cache to]]>
+ </doc>
+ </method>
+ <method name="addFileToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an file path to the current set of classpath entries It adds the file
+ to cache as well.
+
+ @param file Path of the file to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getFileClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the file entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="addArchiveToClassPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="archive" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an archive path to the current set of classpath entries. It adds the
+ archive to cache as well.
+
+ @param archive Path of the archive to be added
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="getArchiveClassPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the archive entries in classpath as an array of Path
+
+ @param conf Configuration that contains the classpath setting]]>
+ </doc>
+ </method>
+ <method name="createSymlink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method allows you to create symlinks in the current working directory
+ of the task to all the cache files/archives
+ @param conf the jobconf]]>
+ </doc>
+ </method>
+ <method name="getSymlink" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This method checks to see if symlinks are to be create for the
+ localized cache files in the current working directory
+ @param conf the jobconf
+ @return true if symlinks are to be created- else return false]]>
+ </doc>
+ </method>
+ <method name="checkURIs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uriFiles" type="java.net.URI[]"/>
+ <param name="uriArchives" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[This method checks if there is a conflict in the fragment names
+ of the uris. Also makes sure that each uri has a fragment. It
+ is only to be called if you want to create symlinks for
+ the various archives and files.
+ @param uriFiles The uri array of urifiles
+ @param uriArchives the uri array of uri archives]]>
+ </doc>
+ </method>
+ <method name="purgeCache"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clear the entire contents of the cache and delete the backing files. This
+ should only be used when the server is reinitializing, because the users
+ are going to lose their files.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Distribute application-specific large, read-only files efficiently.
+
+ <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
+ framework to cache files (text, archives, jars etc.) needed by applications.
+ </p>
+
+ <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
+ via the {@link org.apache.hadoop.mapred.JobConf}.
+ The <code>DistributedCache</code> assumes that the
+ files specified via hdfs:// urls are already present on the
+ {@link FileSystem} at the path specified by the url.</p>
+
+ <p>The framework will copy the necessary files on to the slave node before
+ any tasks for the job are executed on that node. Its efficiency stems from
+ the fact that the files are only copied once per job and the ability to
+ cache archives which are un-archived on the slaves.</p>
+
+ <p><code>DistributedCache</code> can be used to distribute simple, read-only
+ data/text files and/or more complex types such as archives, jars etc.
+ Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes.
+ Jars may be optionally added to the classpath of the tasks, a rudimentary
+ software distribution mechanism. Files have execution permissions.
+ Optionally users can also direct it to symlink the distributed cache file(s)
+ into the working directory of the task.</p>
+
+ <p><code>DistributedCache</code> tracks modification timestamps of the cache
+ files. Clearly the cache files should not be modified by the application
+ or externally while the job is executing.</p>
+
+ <p>Here is an illustrative example on how to use the
+ <code>DistributedCache</code>:</p>
+ <p><blockquote><pre>
+ // Setting up the cache for the application
+
+ 1. Copy the requisite files to the <code>FileSystem</code>:
+
+ $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
+ $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
+ $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
+ $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
+ $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
+ $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
+
+ 2. Setup the application's <code>JobConf</code>:
+
+ JobConf job = new JobConf();
+ DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
+ job);
+ DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
+ DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
+ DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
+
+ 3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
+ or {@link org.apache.hadoop.mapred.Reducer}:
+
+ public static class MapClass extends MapReduceBase
+ implements Mapper&lt;K, V, K, V&gt; {
+
+ private Path[] localArchives;
+ private Path[] localFiles;
+
+ public void configure(JobConf job) {
+ // Get the cached archives/files
+ localArchives = DistributedCache.getLocalCacheArchives(job);
+ localFiles = DistributedCache.getLocalCacheFiles(job);
+ }
+
+ public void map(K key, V value,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Use data from the cached archives/files here
+ // ...
+ // ...
+ output.collect(k, v);
+ }
+ }
+
+ </pre></blockquote></p>
+
+ @see org.apache.hadoop.mapred.JobConf
+ @see org.apache.hadoop.mapred.JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.filecache.DistributedCache -->
+</package>
+<package name="org.apache.hadoop.fs">
+ <!-- start class org.apache.hadoop.fs.BlockLocation -->
+ <class name="BlockLocation" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="BlockLocation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with host, name, offset and length]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], java.lang.String[], long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with host, name, network topology, offset and length]]>
+ </doc>
+ </constructor>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hosts (hostname) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of names (hostname:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="getTopologyPaths" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of network topology paths for each of the hosts.
+ The last component of the path is the host.]]>
+ </doc>
+ </method>
+ <method name="getOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the length of the block]]>
+ </doc>
+ </method>
+ <method name="setOffset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <doc>
+ <![CDATA[Set the start offset of file associated with this block]]>
+ </doc>
+ </method>
+ <method name="setLength"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="length" type="long"/>
+ <doc>
+ <![CDATA[Set the length of block]]>
+ </doc>
+ </method>
+ <method name="setHosts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hosts" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the hosts hosting this block]]>
+ </doc>
+ </method>
+ <method name="setNames"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the names (host:port) hosting this block]]>
+ </doc>
+ </method>
+ <method name="setTopologyPaths"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="topologyPaths" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the network topology paths of the hosts]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement write of Writable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement readFields of Writable]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BlockLocation -->
+ <!-- start class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <class name="BufferedFSInputStream" extends="java.io.BufferedInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="BufferedFSInputStream" type="org.apache.hadoop.fs.FSInputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a <code>BufferedFSInputStream</code>
+ with the specified buffer size,
+ and saves its argument, the input stream
+ <code>in</code>, for later use. An internal
+ buffer array of length <code>size</code>
+ is created and stored in <code>buf</code>.
+
+ @param in the underlying input stream.
+ @param size the buffer size.
+ @exception IllegalArgumentException if size <= 0.]]>
+ </doc>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A class optimizes reading from FSInputStream by bufferring]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.BufferedFSInputStream -->
+ <!-- start class org.apache.hadoop.fs.ChecksumException -->
+ <class name="ChecksumException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumException" type="java.lang.String, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Thrown for checksum errors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumException -->
+ <!-- start class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <class name="ChecksumFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ChecksumFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getApproxChkSumLength" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether to verify checksum.]]>
+ </doc>
+ </method>
+ <method name="getRawFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the raw file system]]>
+ </doc>
+ </method>
+ <method name="getChecksumFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return the name of the checksum file associated with a file.]]>
+ </doc>
+ </method>
+ <method name="isChecksumFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Return true iff file is a checksum file name.]]>
+ </doc>
+ </method>
+ <method name="getChecksumFileLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="fileSize" type="long"/>
+ <doc>
+ <![CDATA[Return the length of the checksum file given the size of the
+ actual file.]]>
+ </doc>
+ </method>
+ <method name="getBytesPerSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the bytes Per Checksum]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getChecksumLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="long"/>
+ <param name="bytesPerSum" type="int"/>
+ <doc>
+ <![CDATA[Calculated the length of the checksum file in bytes.
+ @param size the length of the data file in bytes
+ @param bytesPerSum the number of bytes in a checksum block
+ @return the number of bytes in the checksum file]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+ Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt>
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Rename files/dirs]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Implement the delete(Path, boolean) in checksum
+ file system.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="copyCrc" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ If src and dst are directories, the copyCrc parameter
+ determines whether to copy CRC files.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Report a checksum error to the file system.
+ @param f the file name containing the error
+ @param in the stream open on the file
+ @param inPos the position of the beginning of the bad data in the file
+ @param sums the stream open on the checksum file
+ @param sumsPos the position of the beginning of the bad data in the checksum file
+ @return if retry is neccessary]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract Checksumed FileSystem.
+ It provide a basice implementation of a Checksumed FileSystem,
+ which creates a checksum file for each raw file.
+ It generates & verifies checksums at the client side.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ChecksumFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ContentSummary -->
+ <class name="ContentSummary" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ContentSummary"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="ContentSummary" type="long, long, long, long, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the length]]>
+ </doc>
+ </method>
+ <method name="getDirectoryCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the directory count]]>
+ </doc>
+ </method>
+ <method name="getFileCount" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the file count]]>
+ </doc>
+ </method>
+ <method name="getQuota" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the directory quota]]>
+ </doc>
+ </method>
+ <method name="getSpaceConsumed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retuns (disk) space consumed]]>
+ </doc>
+ </method>
+ <method name="getSpaceQuota" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns (disk) space quota]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHeader" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the header of the output.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the header of the output]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qOption" type="boolean"/>
+ <doc>
+ <![CDATA[Return the string representation of the object in the output format.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the string representation of the object]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store the summary of a content (a directory or a file).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ContentSummary -->
+ <!-- start class org.apache.hadoop.fs.DF -->
+ <class name="DF" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DF" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="DF" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFilesystem" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCapacity" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getAvailable" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPercentUsed" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMount" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="DF_INTERVAL_DEFAULT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'df' program.
+ Tested on Linux, FreeBSD, Cygwin.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DF -->
+ <!-- start class org.apache.hadoop.fs.DU -->
+ <class name="DU" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DU" type="java.io.File, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param interval refresh the disk usage at this interval
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <constructor name="DU" type="java.io.File, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Keeps track of disk usage.
+ @param path the path to check disk usage in
+ @param conf configuration object
+ @throws IOException if we fail to refresh the disk usage]]>
+ </doc>
+ </constructor>
+ <method name="decDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Decrease how much disk space we use.
+ @param value decrease by this value]]>
+ </doc>
+ </method>
+ <method name="incDfsUsed"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Increase how much disk space we use.
+ @param value increase by this value]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return disk space used
+ @throws IOException if the shell command fails]]>
+ </doc>
+ </method>
+ <method name="getDirPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the path of which we're keeping track of disk usage]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Start the disk usage checking thread.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Shut down the refreshing thread.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Filesystem disk space usage statistics. Uses the unix 'du' program]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.DU -->
+ <!-- start class org.apache.hadoop.fs.FileChecksum -->
+ <class name="FileChecksum" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FileChecksum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getAlgorithmName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The checksum algorithm name]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The length of the checksum in bytes]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The value of the checksum in bytes]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true if both the algorithms and the values are the same.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An abstract class representing file checksums for files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileChecksum -->
+ <!-- start class org.apache.hadoop.fs.FileStatus -->
+ <class name="FileStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <constructor name="FileStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLen" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDir" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this a directory?
+ @return true if this is a directory]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the block size of the file.
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the replication factor of a file.
+ @return the replication factor of a file.]]>
+ </doc>
+ </method>
+ <method name="getModificationTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the modification time of the file.
+ @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getAccessTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the access time of the file.
+ @return the access time of file in milliseconds since January 1, 1970 UTC.]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get FsPermission associated with the file.
+ @return permssion. If a filesystem does not have a notion of permissions
+ or if permissions could not be determined, then default
+ permissions equivalent of "rwxrwxrwx" is returned.]]>
+ </doc>
+ </method>
+ <method name="getOwner" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the owner of the file.
+ @return owner of the file. The string could be empty if there is no
+ notion of owner of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getGroup" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the group associated with the file.
+ @return group for the file. The string could be empty if there is no
+ notion of group of a file in a filesystem or if it could not
+ be determined (rare).]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Sets permission.
+ @param permission if permission is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="owner" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets owner.
+ @param owner if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="setGroup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets group.
+ @param group if it is null, default value is set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare this object to another object
+
+ @param o the object to be compared.
+ @return a negative integer, zero, or a positive integer as this object
+ is less than, equal to, or greater than the specified object.
+
+ @throws ClassCastException if the specified object's is not of
+ type FileStatus]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare if this object is equal to another object
+ @param o the object to be compared.
+ @return true if two file status has the same path name; false if not.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for the object, which is defined as
+ the hash code of the path name.
+
+ @return a hash code value for the path name.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that represents the client side information for a file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileStatus -->
+ <!-- start class org.apache.hadoop.fs.FileSystem -->
+ <class name="FileSystem" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="FileSystem"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the configured filesystem implementation.]]>
+ </doc>
+ </method>
+ <method name="getDefaultUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default filesystem URI from a configuration.
+ @param conf the configuration to access
+ @return the uri of the default filesystem]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.net.URI"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="setDefaultUri"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the default filesystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="getNamed" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="call #get(URI,Configuration) instead.">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated call #get(URI,Configuration) instead.]]>
+ </doc>
+ </method>
+ <method name="getLocal" return="org.apache.hadoop.fs.LocalFileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the local file syste
+ @param conf the configuration to configure the file system with
+ @return a LocalFileSystem]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the FileSystem for this URI's scheme and authority. The scheme
+ of the URI determines a configuration property name,
+ <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
+ The entire URI is passed to the FileSystem instance's initialize method.]]>
+ </doc>
+ </method>
+ <method name="closeAll"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all cached filesystems. Be sure those filesystems are not
+ used anymore.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a file with the provided permission
+ The permission of the file is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ It is implemented using two RPCs. It is understood that it is inefficient,
+ but the implementation is thread-safe. The other option is to change the
+ value of umask in configuration to be 0, but it is not thread-safe.
+
+ @param fs file system handle
+ @param file the name of the file to be created
+ @param permission the permission of the file
+ @return an output stream
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a directory with the provided permission
+ The permission of the directory is set to be the provided permission as in
+ setPermission, not permission&~umask
+
+ @see #create(FileSystem, Path, FsPermission)
+
+ @param fs file system handle
+ @param dir the name of the directory to be created
+ @param permission the permission of the directory
+ @return true if the directory creation succeeds; false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file. For a nonexistent
+ file or regions, null will be returned.
+
+ This call is most helpful with DFS, where it returns
+ hostnames of machines that contain the given file.
+
+ The FileSystem will simply return an elt containing 'localhost'.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file to open]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param permission
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize
+ @param progress
+ @throws IOException
+ @see #setPermission(Path, FsPermission)]]>
+ </doc>
+ </method>
+ <method name="createNewFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the given Path as a brand-new zero-length file. If
+ create fails, or if it already existed, return false.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, getConf().getInt("io.file.buffer.size", 4096), null)
+ @param f the existing file to be appended.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, bufferSize, null).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append to an existing file (optional operation).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @param progress for reporting progress if it is not null.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get replication.
+
+ @deprecated Use getFileStatus() instead
+ @param src file name
+ @return file replication
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file.
+
+ @param f the path to delete.
+ @param recursive if path is a directory and set to
+ true, the directory is deleted else throws an exception. In
+ case of a file the recursive can be set to either true or false.
+ @return true if delete is successful else false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="deleteOnExit" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a path to be deleted when FileSystem is closed.
+ When the JVM shuts down,
+ all FileSystem objects will be closed automatically.
+ Then,
+ the marked path will be deleted as a result of closing the FileSystem.
+
+ The path has to exist in the file system.
+
+ @param f the path to delete.
+ @return true if deleteOnExit is successful, otherwise false.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="processDeleteOnExit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Delete all files that were marked as delete-on-exit. This recursively
+ deletes all files in the specified paths.]]>
+ </doc>
+ </method>
+ <method name="exists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if exists.
+ @param f source file]]>
+ </doc>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[True iff the named path is a regular file.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the {@link ContentSummary} of a given {@link Path}.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+
+ @param f
+ given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given path using the user-supplied path
+ filter.
+
+ @param f
+ a path name
+ @param filter
+ the user-supplied path filter
+ @return an array of FileStatus objects for the files under the given path
+ after applying the filter
+ @throws IOException
+ if encounter any problem while fetching the status]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using default
+ path filter.
+
+ @param files
+ a list of paths
+ @return a list of statuses for the files under the given paths after
+ applying the filter default Path filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Filter files/directories in the given list of paths using user-supplied
+ path filter.
+
+ @param files
+ a list of paths
+ @param filter
+ the user-supplied path filter
+ @return a list of statuses for the files under the given paths after
+ applying the filter
+ @exception IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Return all the files that match filePattern and are not checksum
+ files. Results are sorted by their names.
+
+ <p>
+ A filename pattern is composed of <i>regular</i> characters and
+ <i>special pattern matching</i> characters, which are:
+
+ <dl>
+ <dd>
+ <dl>
+ <p>
+ <dt> <tt> ? </tt>
+ <dd> Matches any single character.
+
+ <p>
+ <dt> <tt> * </tt>
+ <dd> Matches zero or more characters.
+
+ <p>
+ <dt> <tt> [<i>abc</i>] </tt>
+ <dd> Matches a single character from character set
+ <tt>{<i>a,b,c</i>}</tt>.
+
+ <p>
+ <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
+ <dd> Matches a single character from the character range
+ <tt>{<i>a...b</i>}</tt>. Note that character <tt><i>a</i></tt> must be
+ lexicographically less than or equal to character <tt><i>b</i></tt>.
+
+ <p>
+ <dt> <tt> [^<i>a</i>] </tt>
+ <dd> Matches a single character that is not from character set or range
+ <tt>{<i>a</i>}</tt>. Note that the <tt>^</tt> character must occur
+ immediately to the right of the opening bracket.
+
+ <p>
+ <dt> <tt> \<i>c</i> </tt>
+ <dd> Removes (escapes) any special meaning of character <i>c</i>.
+
+ <p>
+ <dt> <tt> {ab,cd} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt>
+
+ <p>
+ <dt> <tt> {ab,c{de,fh}} </tt>
+ <dd> Matches a string from the string set <tt>{<i>ab, cde, cfh</i>}</tt>
+
+ </dl>
+ </dd>
+ </dl>
+
+ @param pathPattern a regular expression specifying a pth pattern
+
+ @return an array of paths that match the path pattern
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array of FileStatus objects whose path names match pathPattern
+ and is accepted by the user-supplied path filter. Results are sorted by
+ their path names.
+ Return null if pathPattern has no glob and the path does not exist.
+ Return an empty array if pathPattern has a glob and no path matches it.
+
+ @param pathPattern
+ a regular expression specifying the path pattern
+ @param filter
+ a user-supplied path filter
+ @return an array of FileStatus objects
+ @throws IOException if any I/O error occurs when fetching file status]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the current user's home directory in this filesystem.
+ The default implementation returns "/user/$USER/".]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param new_dir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Call {@link #mkdirs(Path, FsPermission)} with default permission.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make the given file and all non-existent parents into
+ directories. Has the semantics of Unix 'mkdir -p'.
+ Existence of the directory hierarchy is not an error.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name and the source is kept intact afterwards]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name, removing the source afterwards.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src files are on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+ </doc>
+ </method>
+ <method name="moveToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ Remove the source afterwards]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[No more filesystem operations are needed. Will
+ release any held locks.]]>
+ </doc>
+ </method>
+ <method name="getUsed" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the total size of all files in the filesystem.]]>
+ </doc>
+ </method>
+ <method name="getBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use getFileStatus() instead">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use getFileStatus() instead]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a file status object that represents the path.
+ @param f The path we want information from
+ @return a FileStatus object
+ @throws FileNotFoundException when the path does not exist;
+ IOException see specific implementation]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the checksum of a file.
+
+ @param f The file path
+ @return The file checksum. The default return value is null,
+ which indicates that no checksum algorithm is implemented
+ in the corresponding FileSystem.]]>
+ </doc>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ <doc>
+ <![CDATA[Set the verify checksum flag. This is only applicable if the
+ corresponding FileSystem supports checksum. By default doesn't do anything.
+ @param verifyChecksum]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set permission of a path.
+ @param p
+ @param permission]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set owner of a path (i.e. a file or a directory).
+ The parameters username and groupname cannot both be null.
+ @param p The path
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.]]>
+ </doc>
+ </method>
+ <method name="setTimes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="mtime" type="long"/>
+ <param name="atime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set access time of a file
+ @param p The path
+ @param mtime Set the modification time of this file.
+ The number of milliseconds since Jan 1, 1970.
+ A value of -1 means that this call should not set modification time.
+ @param atime Set the access time of this file.
+ The number of milliseconds since Jan 1, 1970.
+ A value of -1 means that this call should not set access time.]]>
+ </doc>
+ </method>
+ <method name="getStatistics" return="java.util.Map"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="use {@link #getAllStatistics} instead">
+ <doc>
+ <![CDATA[Get the Map of Statistics object indexed by URI Scheme.
+ @return a Map having a key as URI scheme and value as Statistics object
+ @deprecated use {@link #getAllStatistics} instead]]>
+ </doc>
+ </method>
+ <method name="getAllStatistics" return="java.util.List"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the FileSystem classes that have Statistics]]>
+ </doc>
+ </method>
+ <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scheme" type="java.lang.String"/>
+ <param name="cls" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get the statistics for a particular file system
+ @param cls the class to lookup
+ @return a statistics object]]>
+ </doc>
+ </method>
+ <method name="clearStatistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="printStatistics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="statistics" type="org.apache.hadoop.fs.FileSystem.Statistics"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The statistics for this file system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An abstract base class for a fairly generic filesystem. It
+ may be implemented as a distributed filesystem, or as a "local"
+ one that reflects the locally-connected disk. The local version
+ exists for small Hadoop instances and for testing.
+
+ <p>
+
+ All user code that may potentially use the Hadoop Distributed
+ File System should be written to use a FileSystem object. The
+ Hadoop DFS is a multi-machine system that appears as a single
+ disk. It's useful because of its fault tolerance and potentially
+ very large capacity.
+
+ <p>
+ The local implementation is {@link LocalFileSystem} and distributed
+ implementation is DistributedFileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem -->
+ <!-- start class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <class name="FileSystem.Statistics" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileSystem.Statistics" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="incrementBytesRead"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes read in the statistics
+ @param newBytes the additional bytes read]]>
+ </doc>
+ </method>
+ <method name="incrementBytesWritten"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newBytes" type="long"/>
+ <doc>
+ <![CDATA[Increment the bytes written in the statistics
+ @param newBytes the additional bytes written]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes read
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total number of bytes written
+ @return the number of bytes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the counts of bytes to 0.]]>
+ </doc>
+ </method>
+ <method name="getScheme" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the uri scheme associated with this statistics object.
+ @return the schema associated with this set of statistics]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileSystem.Statistics -->
+ <!-- start class org.apache.hadoop.fs.FileUtil -->
+ <class name="FileUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path
+
+ @param stats
+ an array of FileStatus objects
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[convert an array of FileStatus to an array of Path.
+ If stats if null, return path
+ @param stats
+ an array of FileStatus objects
+ @param path
+ default path to return in stats is null
+ @return an array of paths corresponding to the input]]>
+ </doc>
+ </method>
+ <method name="fullyDelete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a directory and all its contents. If
+ we return false, the directory may be partially-deleted.]]>
+ </doc>
+ </method>
+ <method name="fullyDelete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link FileSystem#delete(Path, boolean)}">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recursively delete a directory.
+
+ @param fs {@link FileSystem} on which the path is present
+ @param dir directory to recursively delete
+ @throws IOException
+ @deprecated Use {@link FileSystem#delete(Path, boolean)}]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy files between FileSystems.]]>
+ </doc>
+ </method>
+ <method name="copyMerge" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="srcDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dstFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="addString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy all files in a directory to one output file (merge).]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy local files to a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="copy" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="java.io.File"/>
+ <param name="deleteSource" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy FileSystem files to local files.]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param filename The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="makeShellPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <param name="makeCanonicalPath" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @param makeCanonicalPath
+ Whether to make canonical path for the file passed
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+ </doc>
+ </method>
+ <method name="getDU" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Takes an input dir and returns the du on that local directory. Very basic
+ implementation.
+
+ @param dir
+ The input dir to get the disk space of this local dir
+ @return The total disk space of the input local directory]]>
+ </doc>
+ </method>
+ <method name="unZip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="unzipDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a File input it will unzip the file in a the unzip directory
+ passed as the second parameter
+ @param inFile The zip file as input
+ @param unzipDir The unzip directory where to unzip the zip file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="unTar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="java.io.File"/>
+ <param name="untarDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Given a Tar File as input it will untar the file in a the untar directory
+ passed as the second parameter
+
+ This utility will untar ".tar" files and ".tar.gz","tgz" files.
+
+ @param inFile The tar file as input.
+ @param untarDir The untar directory where to untar the tar file.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="symLink" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="linkname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a soft link between a src and destination
+ only on a local disk. HDFS does not support this
+ @param target the target for symlink
+ @param linkname the symlink
+ @return value returned by the command]]>
+ </doc>
+ </method>
+ <method name="chmod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="perm" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Change the permissions on a filename.
+ @param filename the name of the file to change
+ @param perm the permission string
+ @return the exit code from the command
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="createLocalTempFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="basefile" type="java.io.File"/>
+ <param name="prefix" type="java.lang.String"/>
+ <param name="isDeleteOnExit" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a tmp file for a base file.
+ @param basefile the base file of the tmp
+ @param prefix file name prefix of tmp
+ @param isDeleteOnExit if true, the tmp will be deleted when the VM exits
+ @return a newly created tmp file
+ @exception IOException If a tmp file cannot created
+ @see java.io.File#createTempFile(String, String, File)
+ @see java.io.File#deleteOnExit()]]>
+ </doc>
+ </method>
+ <method name="replaceFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.io.File"/>
+ <param name="target" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move the src file to the name specified by target.
+ @param src the source file
+ @param target the target file
+ @exception IOException If this operation fails]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of file-processing util methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil -->
+ <!-- start class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <class name="FileUtil.HardLink" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileUtil.HardLink"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createHardLink"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.io.File"/>
+ <param name="linkName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a hardlink]]>
+ </doc>
+ </method>
+ <method name="getLinkCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileName" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Retrieves the number of links to the specified file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Class for creating hardlinks.
+ Supports Unix, Cygwin, WindXP.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FileUtil.HardLink -->
+ <!-- start class org.apache.hadoop.fs.FilterFileSystem -->
+ <class name="FilterFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FilterFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FilterFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+ for this FileSystem
+ @param conf the configuration]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="call #getUri() instead.">
+ <doc>
+ <![CDATA[@deprecated call #getUri() instead.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Make sure that a path specifies a FileSystem.]]>
+ </doc>
+ </method>
+ <method name="checkPath"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Check that a Path belongs to this FileSystem.]]>
+ </doc>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set replication for an existing file.
+
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+ false if file does not exist or is a directory]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames Path src to Path dst. Can take place on local fs
+ or remote DFS.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete a file]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List files in a directory.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+
+ @param newDir]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the given file system
+
+ @return the directory pathname]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is on the local disk. Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a local File that the user can write output to. The caller
+ provides both the eventual FS target name and the local working
+ file. If the FS is local, we write directly into the target. If
+ the FS is remote, we write into the tmp local area.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when we're all done writing to the target. A local FS will
+ do nothing, because we've written to exactly the right place. A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+ </doc>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize i/o time.]]>
+ </doc>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default replication.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get file status.]]>
+ </doc>
+ </method>
+ <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setVerifyChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A <code>FilterFileSystem</code> contains
+ some other file system, which it uses as
+ its basic file system, possibly transforming
+ the data along the way or providing additional
+ functionality. The class <code>FilterFileSystem</code>
+ itself simply overrides all methods of
+ <code>FileSystem</code> with versions that
+ pass all requests to the contained file
+ system. Subclasses of <code>FilterFileSystem</code>
+ may further override some of these methods
+ and may also provide additional methods
+ and fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FilterFileSystem -->
+ <!-- start class org.apache.hadoop.fs.FSDataInputStream -->
+ <class name="FSDataInputStream" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSDataInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="desired" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
+ and buffers input through a {@link BufferedInputStream}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSDataOutputStream -->
+ <class name="FSDataOutputStream" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Syncable"/>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWrappedStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility that wraps a {@link OutputStream} in a {@link DataOutputStream},
+ buffers output through a {@link BufferedOutputStream} and creates a checksum
+ file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSDataOutputStream -->
+ <!-- start class org.apache.hadoop.fs.FSError -->
+ <class name="FSError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Thrown for unexpected filesystem errors, presumed to reflect disk errors
+ in the native filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSError -->
+ <!-- start class org.apache.hadoop.fs.FSInputChecker -->
+ <class name="FSInputChecker" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs]]>
+ </doc>
+ </constructor>
+ <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int, boolean, java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param file The name of the file to be read
+ @param numOfRetries Number of read retries when ChecksumError occurs
+ @param sum the type of Checksum engine
+ @param chunkSize maximun chunk size
+ @param checksumSize the number byte of each checksum]]>
+ </doc>
+ </constructor>
+ <method name="readChunk" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads in next checksum chunk data into <code>buf</code> at <code>offset</code>
+ and checksum into <code>checksum</code>.
+ The method is used for implementing read, therefore, it should be optimized
+ for sequential reading
+ @param pos chunkPos
+ @param buf desitination buffer
+ @param offset offset in buf at which to store data
+ @param len maximun number of bytes to read
+ @return number of bytes read]]>
+ </doc>
+ </method>
+ <method name="getChunkPosition" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <doc>
+ <![CDATA[Return position of beginning of chunk containing pos.
+
+ @param pos a postion in the file
+ @return the starting position of the chunk which contains the byte]]>
+ </doc>
+ </method>
+ <method name="needChecksum" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if there is a need for checksum verification]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read one checksum-verified byte
+
+ @return the next byte of data, or <code>-1</code> if the end of the
+ stream is reached.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read checksum verified bytes from this byte-input stream into
+ the specified byte array, starting at the given offset.
+
+ <p> This method implements the general contract of the corresponding
+ <code>{@link InputStream#read(byte[], int, int) read}</code> method of
+ the <code>{@link InputStream}</code> class. As an additional
+ convenience, it attempts to read as many bytes as possible by repeatedly
+ invoking the <code>read</code> method of the underlying stream. This
+ iterated <code>read</code> continues until one of the following
+ conditions becomes true: <ul>
+
+ <li> The specified number of bytes have been read,
+
+ <li> The <code>read</code> method of the underlying stream returns
+ <code>-1</code>, indicating end-of-file.
+
+ </ul> If the first <code>read</code> on the underlying stream returns
+ <code>-1</code> to indicate end-of-file then this method returns
+ <code>-1</code>. Otherwise this method returns the number of bytes
+ actually read.
+
+ @param b destination buffer.
+ @param off offset at which to start storing bytes.
+ @param len maximum number of bytes to read.
+ @return the number of bytes read, or <code>-1</code> if the end of
+ the stream has been reached.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if any checksum error occurs]]>
+ </doc>
+ </method>
+ <method name="checksum2long" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="checksum" type="byte[]"/>
+ <doc>
+ <![CDATA[Convert a checksum byte array to a long]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over and discards <code>n</code> bytes of data from the
+ input stream.
+
+ <p>This method may skip more bytes than are remaining in the backing
+ file. This produces no exception and the number of bytes skipped
+ may include some number of bytes that were beyond the EOF of the
+ backing file. Attempting to read from the stream after skipping past
+ the end will result in -1 indicating the end of the file.
+
+<p>If <code>n</code> is negative, no bytes are skipped.
+
+ @param n the number of bytes to be skipped.
+ @return the actual number of bytes skipped.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to skip to is corrupted]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given position in the stream.
+ The next read() will be from that position.
+
+ <p>This method may seek past the end of the file.
+ This produces no exception and an attempt to read from
+ the stream will result in -1 indicating the end of the file.
+
+ @param pos the postion to seek to.
+ @exception IOException if an I/O error occurs.
+ ChecksumException if the chunk to seek to is corrupted]]>
+ </doc>
+ </method>
+ <method name="readFully" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="stm" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A utility function that tries to read up to <code>len</code> bytes from
+ <code>stm</code>
+
+ @param stm an input stream
+ @param buf destiniation buffer
+ @param offset offset at which to store data
+ @param len number of bytes to read
+ @return actual number of bytes read
+ @throws IOException if there is any IO error]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="verifyChecksum" type="boolean"/>
+ <param name="sum" type="java.util.zip.Checksum"/>
+ <param name="maxChunkSize" type="int"/>
+ <param name="checksumSize" type="int"/>
+ <doc>
+ <![CDATA[Set the checksum related parameters
+ @param verifyChecksum whether to verify checksum
+ @param sum which type of checksum to use
+ @param maxChunkSize maximun chunk size
+ @param checksumSize checksum size]]>
+ </doc>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="readlimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="file" type="org.apache.hadoop.fs.Path"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file name from which data is read from]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This is a generic input stream for verifying checksums for
+ data before it is read by a user.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputChecker -->
+ <!-- start class org.apache.hadoop.fs.FSInputStream -->
+ <class name="FSInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.Seekable"/>
+ <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+ <constructor name="FSInputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="seek"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[FSInputStream is a generic old InputStream with a little bit
+ of RAF-style seek ability.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSInputStream -->
+ <!-- start class org.apache.hadoop.fs.FSOutputSummer -->
+ <class name="FSOutputSummer" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FSOutputSummer" type="java.util.zip.Checksum, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="writeChunk"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <param name="checksum" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write one byte]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes <code>len</code> bytes from the specified byte array
+ starting at offset <code>off</code> and generate a checksum for
+ each data chunk.
+
+ <p> This method stores bytes from the given array into this
+ stream's buffer before it gets checksumed. The buffer gets checksumed
+ and flushed to the underlying output stream when all data
+ in a checksum chunk are in the buffer. If the buffer is empty and
+ requested length is at least as large as the size of next checksum chunk
+ size, this method will checksum and write the chunk directly
+ to the underlying output stream. Thus it avoids uneccessary data copy.
+
+ @param b the data.
+ @param off the start offset in the data.
+ @param len the number of bytes to write.
+ @exception IOException if an I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flushBuffer"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="convertToByteStream" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sum" type="java.util.zip.Checksum"/>
+ <param name="checksumSize" type="int"/>
+ <doc>
+ <![CDATA[Converts a checksum integer value to a byte stream]]>
+ </doc>
+ </method>
+ <method name="resetChecksumChunk"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Resets existing buffer with a new one of the specified size.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is a generic output stream for generating checksums for
+ data before it is written to the underlying stream]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FSOutputSummer -->
+ <!-- start class org.apache.hadoop.fs.FsShell -->
+ <class name="FsShell" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="FsShell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsShell" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentTrashDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the Trash object associated with this shell.]]>
+ </doc>
+ </method>
+ <method name="byteDesc" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Consider using {@link org.apache.hadoop.util.StringUtils#byteDesc} instead.">
+ <param name="len" type="long"/>
+ <doc>
+ <![CDATA[Return an abbreviated English-language desc of the byte length
+ @deprecated Consider using {@link org.apache.hadoop.util.StringUtils#byteDesc} instead.]]>
+ </doc>
+ </method>
+ <method name="limitDecimalTo2" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="Consider using {@link org.apache.hadoop.util.StringUtils#limitDecimalTo2} instead.">
+ <param name="d" type="double"/>
+ <doc>
+ <![CDATA[@deprecated Consider using {@link org.apache.hadoop.util.StringUtils#limitDecimalTo2} instead.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[run]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[main() has some simple utility methods]]>
+ </doc>
+ </method>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="dateForm" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="modifFmt" type="java.text.SimpleDateFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provide command line access to a FileSystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsShell -->
+ <!-- start class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <class name="FsUrlStreamHandlerFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.net.URLStreamHandlerFactory"/>
+ <constructor name="FsUrlStreamHandlerFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FsUrlStreamHandlerFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createURLStreamHandler" return="java.net.URLStreamHandler"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ </method>
+ <doc>
+ <![CDATA[Factory for URL stream handlers.
+
+ There is only one handler whose job is to create UrlConnections. A
+ FsUrlConnection relies on FileSystem to choose the appropriate FS
+ implementation.
+
+ Before returning our handler, we make sure that FileSystem knows an
+ implementation for the requested scheme/protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.FsUrlStreamHandlerFactory -->
+ <!-- start class org.apache.hadoop.fs.HarFileSystem -->
+ <class name="HarFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HarFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[public construction of harfilesystem]]>
+ </doc>
+ </constructor>
+ <constructor name="HarFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor to create a HarFileSystem with an
+ underlying filesystem.
+ @param fs]]>
+ </doc>
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initialize a Har filesystem per har archive. The
+ archive home directory is the top level directory
+ in the filesystem that contains the HAR archive.
+ Be careful with this method, you do not want to go
+ on creating new Filesystem instances per call to
+ path.getFileSystem().
+ the uri of Har is
+ har://underlyingfsscheme-host:port/archivepath.
+ or
+ har:///archivepath. This assumes the underlying filesystem
+ to be used in case not specified.]]>
+ </doc>
+ </method>
+ <method name="getHarVersion" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive.]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the uri of this filesystem.
+ The uri is of the form
+ har://underlyingfsschema-host:port/pathintheunderlyingfs]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[get block locations from the underlying fs
+ @param file the input filestatus to get block locations
+ @param start the start in the file
+ @param len the length in the file
+ @return block locations for this segment of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getHarHash" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[the hash of the path p inside iniside
+ the filesystem
+ @param p the path in the harfilesystem
+ @return the hash code of the path.]]>
+ </doc>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[return the filestatus of files in har archive.
+ The permission returned are that of the archive
+ index files. The permissions are not persisted
+ while creating a hadoop archive.
+ @param f the path in har filesystem
+ @return filestatus.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns a har input stream which fakes end of
+ file. It reads the index files to get the part
+ file name and the size and start of the file.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[liststatus returns the children of a directory
+ after looking up the index files.]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the top level archive path.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[copies the file in the har filesystem to a local file.]]>
+ </doc>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[not implemented.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permisssion" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Not implemented.]]>
+ </doc>
+ </method>
+ <field name="VERSION" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is an implementation of the Hadoop Archive
+ Filesystem. This archive Filesystem has index files
+ of the form _index* and has contents of the form
+ part-*. The index files store the indexes of the
+ real files. The index files are of the form _masterindex
+ and _index. The master index is a level of indirection
+ in to the index file to make the look ups faster. the index
+ file is sorted with hash code of the paths that it contains
+ and the master index contains pointers to the positions in
+ index for ranges of hashcodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.HarFileSystem -->
+ <!-- start class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <class name="InMemoryFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InMemoryFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InMemoryFileSystem" type="java.net.URI, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reserveSpaceWithCheckSum" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Register a file with its size. This will also register a checksum for the
+ file that the user is trying to create. This is required since none of
+ the FileSystem APIs accept the size of the file as argument. But since it
+ is required for us to apriori know the size of the file we are going to
+ create, the user must call this method for each file he wants to create
+ and reserve memory for that file. We either succeed in reserving memory
+ for both the main file and the checksum file and return true, or return
+ false.]]>
+ </doc>
+ </method>
+ <method name="getFiles" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getNumFiles" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+ </method>
+ <method name="getFSSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPercentUsed" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[An implementation of the in-memory filesystem. This implementation assumes
+ that the file lengths are known ahead of time and the total lengths of all
+ the files is below a certain number (like 100 MB, configurable). Use the API
+ reserveSpaceWithCheckSum(Path f, int size) (see below for a description of
+ the API for reserving space in the FS. The uri of this filesystem starts with
+ ramfs:// .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.InMemoryFileSystem -->
+ <!-- start class org.apache.hadoop.fs.LocalDirAllocator -->
+ <class name="LocalDirAllocator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalDirAllocator" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an allocator object
+ @param contextCfgItemName]]>
+ </doc>
+ </constructor>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. This method should be used if the size of
+ the file is not known apriori. We go round-robin over the set of disks
+ (via the configured dirs) and return the first complete path where
+ we could create the parent directory of the passed path.
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS. Pass size as -1 if not known apriori. We
+ round-robin over the set of disks (via the configured dirs) and return
+ the first complete path which has enough space
+ @param pathStr the requested path (this will be created on the first
+ available disk)
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocalPathToRead" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a path from the local FS for reading. We search through all the
+ configured dirs for the file's existence and return the complete
+ path to the file when we find one
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return the complete path to the file on a local disk
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createTmpFileForWrite" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="size" type="long"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates a temporary file in the local FS. Pass size as -1 if not known
+ apriori. We round-robin over the set of disks (via the configured dirs)
+ and select the first complete path which has enough space. A file is
+ created on this directory. The file is guaranteed to go away when the
+ JVM exits.
+ @param pathStr prefix for the temporary file
+ @param size the size of the file that is going to be written
+ @param conf the Configuration object
+ @return a unique temporary file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isContextValid" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextCfgItemName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Method to check whether a context is valid
+ @param contextCfgItemName
+ @return true/false]]>
+ </doc>
+ </method>
+ <method name="ifExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathStr" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[We search through all the configured dirs for the file's existence
+ and return true when we find
+ @param pathStr the requested file (this will be searched)
+ @param conf the Configuration object
+ @return true if files exist. false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of a round-robin scheme for disk allocation for creating
+ files. The way it works is that it is kept track what disk was last
+ allocated for a file write. For the current request, the next disk from
+ the set of disks would be allocated if the free space on the disk is
+ sufficient enough to accomodate the file that is being considered for
+ creation. If the space requirements cannot be met, the next disk in order
+ would be tried and so on till a disk is found with sufficient capacity.
+ Once a disk with sufficient space is identified, a check is done to make
+ sure that the disk is writable. Also, there is an API provided that doesn't
+ take the space requirements into consideration but just checks whether the
+ disk under consideration is writable (this should be used for cases where
+ the file size is not known apriori). An API is provided to read a path that
+ was created earlier. That API works by doing a scan of all the disks for the
+ input pathname.
+ This implementation also provides the functionality of having multiple
+ allocators per JVM (one for each unique functionality or context, like
+ mapred, dfs-client, etc.). It ensures that there is only one instance of
+ an allocator per context per JVM.
+ Note:
+ 1. The contexts referred above are actually the configuration items defined
+ in the Configuration class like "mapred.local.dir" (for which we want to
+ control the dir allocations). The context-strings are exactly those
+ configuration items.
+ 2. This implementation does not take into consideration cases where
+ a disk becomes read-only or goes out of space while a file is being written
+ to (disks are shared between multiple processes, and so the latter situation
+ is probable).
+ 3. In the class implementation, "Disk" is referred to as "Dir", which
+ actually points to the configured directory on the Disk which will be the
+ parent for all file write/read allocations.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalDirAllocator -->
+ <!-- start class org.apache.hadoop.fs.LocalFileSystem -->
+ <class name="LocalFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LocalFileSystem" type="org.apache.hadoop.fs.FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRaw" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reportChecksumFailure" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="inPos" type="long"/>
+ <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+ <param name="sumsPos" type="long"/>
+ <doc>
+ <![CDATA[Moves files to a bad file directory on the same device, so that their
+ storage will not be reused.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the checksumed local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.LocalFileSystem -->
+ <!-- start class org.apache.hadoop.fs.MD5MD5CRC32FileChecksum -->
+ <class name="MD5MD5CRC32FileChecksum" extends="org.apache.hadoop.fs.FileChecksum"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MD5MD5CRC32FileChecksum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Same as this(0, 0, null)]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5MD5CRC32FileChecksum" type="int, long, org.apache.hadoop.io.MD5Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a MD5FileChecksum]]>
+ </doc>
+ </constructor>
+ <method name="getAlgorithmName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="xml" type="org.znerd.xmlenc.XMLOutputter"/>
+ <param name="that" type="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write that object to xml output.]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attrs" type="org.xml.sax.Attributes"/>
+ <exception name="SAXException" type="org.xml.sax.SAXException"/>
+ <doc>
+ <![CDATA[Return the object represented in the attributes.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <field name="LENGTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[MD5 of MD5 of CRC32.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.MD5MD5CRC32FileChecksum -->
+ <!-- start class org.apache.hadoop.fs.Path -->
+ <class name="Path" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="Path" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resolve a child path against a parent path.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a path from a String. Path strings are URIs, but with
+ unescaped elements and some additional normalization.]]>
+ </doc>
+ </constructor>
+ <constructor name="Path" type="java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a Path from components.]]>
+ </doc>
+ </constructor>
+ <method name="toUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this to a URI.]]>
+ </doc>
+ </method>
+ <method name="getFileSystem" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the FileSystem that owns this Path.]]>
+ </doc>
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True if the directory of this path is absolute.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the final component of this path.]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the parent of a path or null if at root.]]>
+ </doc>
+ </method>
+ <method name="suffix" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a suffix to the final name in the path.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="depth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the number of elements in this path.]]>
+ </doc>
+ </method>
+ <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <doc>
+ <![CDATA[Returns a qualified path object.]]>
+ </doc>
+ </method>
+ <field name="SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The directory separator, a slash.]]>
+ </doc>
+ </field>
+ <field name="SEPARATOR_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CUR_DIR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Names a file or directory in a {@link FileSystem}.
+ Path strings use slash as the directory separator. A path string is
+ absolute if it begins with a slash.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Path -->
+ <!-- start interface org.apache.hadoop.fs.PathFilter -->
+ <interface name="PathFilter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Tests whether or not the specified abstract pathname should be
+ included in a pathname list.
+
+ @param path The abstract pathname to be tested
+ @return <code>true</code> if and only if <code>pathname</code>
+ should be included]]>
+ </doc>
+ </method>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PathFilter -->
+ <!-- start interface org.apache.hadoop.fs.PositionedReadable -->
+ <interface name="PositionedReadable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read upto the specified number of bytes, from a given
+ position within a file, and return the number of bytes read. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the specified number of bytes, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <param name="buffer" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read number of bytes equalt to the length of the buffer, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits positional reading.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.PositionedReadable -->
+ <!-- start class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <class name="RawLocalFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RawLocalFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pathToFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Convert a path to a File.]]>
+ </doc>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates the specified directory hierarchy. Does not
+ treat existence as an error.]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="moveFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsWorkingFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOwner"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chown to set owner.]]>
+ </doc>
+ </method>
+ <method name="setPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Use the command chmod to set permission.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implement the FileSystem API for the raw local filesystem.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.RawLocalFileSystem -->
+ <!-- start interface org.apache.hadoop.fs.Seekable -->
+ <interface name="Seekable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location. Can't
+ seek past the end of the file.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current offset from the start of the file]]>
+ </doc>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seeks a different copy of the data. Returns true if
+ found a new source, false otherwise.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stream that permits seeking.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Seekable -->
+ <!-- start interface org.apache.hadoop.fs.Syncable -->
+ <interface name="Syncable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Synchronize all buffer with the underlying devices.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface declare the sync() operation.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.Syncable -->
+ <!-- start class org.apache.hadoop.fs.Trash -->
+ <class name="Trash" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Trash" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a trash can accessor.
+ @param conf a Configuration]]>
+ </doc>
+ </constructor>
+ <constructor name="Trash" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a trash can accessor for the FileSystem provided.]]>
+ </doc>
+ </constructor>
+ <method name="moveToTrash" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move a file or directory to the current trash directory.
+ @return false if the item is already in the trash or trash is disabled]]>
+ </doc>
+ </method>
+ <method name="checkpoint"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a trash checkpoint.]]>
+ </doc>
+ </method>
+ <method name="expunge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete old checkpoints.]]>
+ </doc>
+ </method>
+ <method name="getEmptier" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return a {@link Runnable} that periodically empties the trash of all
+ users, intended to be run by the superuser. Only one checkpoint is kept
+ at a time.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Run an emptier.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provides a <i>trash</i> feature. Files are moved to a user's trash
+ directory, a subdirectory of their home directory named ".Trash". Files are
+ initially moved to a <i>current</i> sub-directory of the trash directory.
+ Within that sub-directory their original path is preserved. Periodically
+ one may checkpoint the current trash and remove older checkpoints. (This
+ design permits trash management without enumeration of the full trash
+ content, without date support in the filesystem, and without clock
+ synchronization.)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.Trash -->
+</package>
+<package name="org.apache.hadoop.fs.ftp">
+ <!-- start class org.apache.hadoop.fs.ftp.FTPException -->
+ <class name="FTPException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FTPException" type="java.lang.String, java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A class to wrap a {@link Throwable} into a Runtime Exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPException -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <class name="FTPFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A stream obtained via this call must be closed before using other APIs of
+ this class or else the invocation will block.]]>
+ </doc>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use delete(Path, boolean) instead">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Use delete(Path, boolean) instead]]>
+ </doc>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BUFFER_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_BLOCK_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} backed by an FTP client provided by <a
+ href="http://commons.apache.org/net/">Apache Commons Net</a>.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+ <!-- start class org.apache.hadoop.fs.ftp.FTPInputStream -->
+ <class name="FTPInputStream" extends="org.apache.hadoop.fs.FSInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FTPInputStream" type="java.io.InputStream, org.apache.commons.net.ftp.FTPClient, org.apache.hadoop.fs.FileSystem.Statistics"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seekToNewSource" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="targetPos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readLimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.fs.ftp.FTPInputStream -->
+</package>
+<package name="org.apache.hadoop.fs.kfs">
+ <!-- start class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+ <class name="KosmosFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="KosmosFileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultReplication" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setReplication" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="replication" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDefaultBlockSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="lock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="shared" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="release"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+ <param name="start" type="long"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return null if the file doesn't exist; otherwise, get the
+ locations of the various chunks of the file file from KFS.]]>
+ </doc>
+ </method>
+ <method name="copyFromLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="copyToLocalFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="delSrc" type="boolean"/>
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="completeLocalOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A FileSystem backed by KFS.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.kfs.KosmosFileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.permission">
+ <!-- start class org.apache.hadoop.fs.permission.AccessControlException -->
+ <class name="AccessControlException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.security.AccessControlException}
+ instead.">
+ <constructor name="AccessControlException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor is needed for unwrapping from
+ {@link org.apache.hadoop.ipc.RemoteException}.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an {@link AccessControlException}
+ with the specified detail message.
+ @param s the detail message.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new exception with the specified cause and a detail
+ message of <tt>(cause==null ? null : cause.toString())</tt> (which
+ typically contains the class and detail message of <tt>cause</tt>).
+ @param cause the cause (which is saved for later retrieval by the
+ {@link #getCause()} method). (A <tt>null</tt> value is
+ permitted, and indicates that the cause is nonexistent or
+ unknown.)]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[An exception class for access control related issues.
+ @deprecated Use {@link org.apache.hadoop.security.AccessControlException}
+ instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.AccessControlException -->
+ <!-- start class org.apache.hadoop.fs.permission.FsAction -->
+ <class name="FsAction" extends="java.lang.Enum"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.fs.permission.FsAction[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="implies" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[Return true if this action implies that action.
+ @param that]]>
+ </doc>
+ </method>
+ <method name="and" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[AND operation.]]>
+ </doc>
+ </method>
+ <method name="or" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/>
+ <doc>
+ <![CDATA[OR operation.]]>
+ </doc>
+ </method>
+ <method name="not" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[NOT operation.]]>
+ </doc>
+ </method>
+ <field name="NONE" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="EXECUTE" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WRITE_EXECUTE" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ_EXECUTE" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READ_WRITE" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ALL" type="org.apache.hadoop.fs.permission.FsAction"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SYMBOL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Symbolic representation]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[File system actions, e.g. read, write, etc.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsAction -->
+ <!-- start class org.apache.hadoop.fs.permission.FsPermission -->
+ <class name="FsPermission" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given {@link FsAction}.
+ @param u user action
+ @param g group action
+ @param o other action]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="short"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct by the given mode.
+ @param mode
+ @see #toShort()]]>
+ </doc>
+ </constructor>
+ <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor
+
+ @param other other permission]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="permission" type="short"/>
+ <doc>
+ <![CDATA[Create an immutable {@link FsPermission} object.]]>
+ </doc>
+ </method>
+ <method name="getUserAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getGroupAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="getOtherAction" return="org.apache.hadoop.fs.permission.FsAction"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return other {@link FsAction}.]]>
+ </doc>
+ </method>
+ <method name="fromShort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="short"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link FsPermission} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="toShort" return="short"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Encode the object to a short.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply a umask to this permission and return a new one]]>
+ </doc>
+ </method>
+ <method name="getUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="setUMask"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Set the user file creation mask (umask)]]>
+ </doc>
+ </method>
+ <method name="getDefault" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default permission.]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="unixSymbolicPermission" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create a FsPermission from a Unix symbolic permission string
+ @param unixSymbolicPermission e.g. "-rw-rw-rw-"]]>
+ </doc>
+ </method>
+ <field name="UMASK_LABEL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[umask property label]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_UMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A class for file/directory permissions.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.FsPermission -->
+ <!-- start class org.apache.hadoop.fs.permission.PermissionStatus -->
+ <class name="PermissionStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="PermissionStatus" type="java.lang.String, java.lang.String, org.apache.hadoop.fs.permission.FsPermission"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <param name="group" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Create an immutable {@link PermissionStatus} object.]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return user name]]>
+ </doc>
+ </method>
+ <method name="getGroupName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return group name]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return permission]]>
+ </doc>
+ </method>
+ <method name="applyUMask" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <doc>
+ <![CDATA[Apply umask.
+ @see FsPermission#applyUMask(FsPermission)]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.fs.permission.PermissionStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create and initialize a {@link PermissionStatus} from {@link DataInput}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="username" type="java.lang.String"/>
+ <param name="groupname" type="java.lang.String"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a {@link PermissionStatus} from its base components.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Store permission related information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.permission.PermissionStatus -->
+</package>
+<package name="org.apache.hadoop.fs.s3">
+ <!-- start class org.apache.hadoop.fs.s3.Block -->
+ <class name="Block" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Block" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Holds metadata about a block of data being stored in a {@link FileSystemStore}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.Block -->
+ <!-- start interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <interface name="FileSystemStore" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inode" type="org.apache.hadoop.fs.s3.INode"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="storeBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="file" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="inodeExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="blockExists" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="blockId" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveINode" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="retrieveBlock" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <param name="byteRangeStart" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteINode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteBlock"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="block" type="org.apache.hadoop.fs.s3.Block"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listSubPaths" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listDeepSubPaths" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="purge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete everything. Used for testing.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="dump"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Diagnostic method to dump all INodes to the console.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for storing and retrieving {@link INode}s and {@link Block}s.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.fs.s3.FileSystemStore -->
+ <!-- start class org.apache.hadoop.fs.s3.INode -->
+ <class name="INode" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="INode" type="org.apache.hadoop.fs.s3.INode.FileType, org.apache.hadoop.fs.s3.Block[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBlocks" return="org.apache.hadoop.fs.s3.Block[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getFileType" return="org.apache.hadoop.fs.s3.INode.FileType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isDirectory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSerializedLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="serialize" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deserialize" return="org.apache.hadoop.fs.s3.INode"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="FILE_TYPES" type="org.apache.hadoop.fs.s3.INode.FileType[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DIRECTORY_INODE" type="org.apache.hadoop.fs.s3.INode"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Holds file metadata including type (regular file, or directory),
+ and the list of blocks that are pointers to the data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.INode -->
+ <!-- start class org.apache.hadoop.fs.s3.MigrationTool -->
+ <class name="MigrationTool" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="MigrationTool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ This class is a tool for migrating data from an older to a newer version
+ of an S3 filesystem.
+ </p>
+ <p>
+ All files in the filesystem are migrated by re-writing the block metadata
+ - no datafiles are touched.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.MigrationTool -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Credentials -->
+ <class name="S3Credentials" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Credentials"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@throws IllegalArgumentException if credentials for S3 cannot be
+ determined.]]>
+ </doc>
+ </method>
+ <method name="getAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSecretAccessKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Extracts AWS credentials from the filesystem URI or configuration.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Credentials -->
+ <!-- start class org.apache.hadoop.fs.s3.S3Exception -->
+ <class name="S3Exception" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3Exception" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown if there is a problem communicating with Amazon S3.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3Exception -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <class name="S3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="S3FileSystem" type="org.apache.hadoop.fs.s3.FileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="isFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param permission Currently ignored.]]>
+ </doc>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[FileStatus for S3 file systems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A block-based {@link FileSystem} backed by
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ </p>
+ @see NativeS3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystem -->
+ <!-- start class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <class name="S3FileSystemException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="S3FileSystemException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when there is a fatal exception while using {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.S3FileSystemException -->
+ <!-- start class org.apache.hadoop.fs.s3.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="org.apache.hadoop.fs.s3.S3FileSystemException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Thrown when Hadoop cannot read the version of the data stored
+ in {@link S3FileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3.VersionMismatchException -->
+</package>
+<package name="org.apache.hadoop.fs.s3native">
+ <!-- start class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+ <class name="NativeS3FileSystem" extends="org.apache.hadoop.fs.FileSystem"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeS3FileSystem"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="NativeS3FileSystem" type="org.apache.hadoop.fs.s3native.NativeFileSystemStore"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.net.URI"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This optional operation is not yet supported.]]>
+ </doc>
+ </method>
+ <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <param name="overwrite" type="boolean"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="delete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="recursive" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getUri" return="java.net.URI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ If <code>f</code> is a file, this method will make a single call to S3.
+ If <code>f</code> is a directory, this method will make a maximum of
+ (<i>n</i> / 1000) + 2 calls to S3, where <i>n</i> is the total number of
+ files and directories contained directly in <code>f</code>.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="mkdirs" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="rename" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="org.apache.hadoop.fs.Path"/>
+ <param name="dst" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the working directory to the given directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A {@link FileSystem} for reading and writing files stored on
+ <a href="http://aws.amazon.com/s3">Amazon S3</a>.
+ Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation
+ stores files on S3 in their
+ native form so they can be read by other S3 tools.
+ </p>
+ @see org.apache.hadoop.fs.s3.S3FileSystem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.s3native.NativeS3FileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.shell">
+ <!-- start class org.apache.hadoop.fs.shell.Command -->
+ <class name="Command" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Command" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor]]>
+ </doc>
+ </constructor>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the command's name excluding the leading character -]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the command on the input path
+
+ @param path the input path
+ @throws IOException if any error occurs]]>
+ </doc>
+ </method>
+ <method name="runAll" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[For each source path, execute the command
+
+ @return 0 if it runs successfully; -1 if it fails]]>
+ </doc>
+ </method>
+ <field name="args" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract class for the execution of a file system command]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Command -->
+ <!-- start class org.apache.hadoop.fs.shell.CommandFormat -->
+ <class name="CommandFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CommandFormat" type="java.lang.String, int, int, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor]]>
+ </doc>
+ </constructor>
+ <method name="parse" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="pos" type="int"/>
+ <doc>
+ <![CDATA[Parse parameters starting from the given position
+
+ @param args an array of input arguments
+ @param pos the position at which starts to parse
+ @return a list of parameters]]>
+ </doc>
+ </method>
+ <method name="getOpt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="option" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return if the option is set or not
+
+ @param option String representation of an option
+ @return true is the option is set; false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Parse the args of a command and check the format of args.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.CommandFormat -->
+ <!-- start class org.apache.hadoop.fs.shell.Count -->
+ <class name="Count" extends="org.apache.hadoop.fs.shell.Command"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Count" type="java.lang.String[], int, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+
+ @param cmd the count command
+ @param pos the starting index of the arguments]]>
+ </doc>
+ </constructor>
+ <method name="matches" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Check if a command is the count command
+
+ @param cmd A string representation of a command starting with "-"
+ @return true if this is a count command; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getCommandName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USAGE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DESCRIPTION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Count the number of directories, files, bytes, quota, and remaining quota.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.fs.shell.Count -->
+</package>
+<package name="org.apache.hadoop.http">
+ <!-- start interface org.apache.hadoop.http.FilterContainer -->
+ <interface name="FilterContainer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="addFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map"/>
+ <doc>
+ <![CDATA[Add a filter to the container.
+ @param name Filter name
+ @param classname Filter class name
+ @param parameters a map from parameter names to initial values]]>
+ </doc>
+ </method>
+ <method name="addGlobalFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map"/>
+ <doc>
+ <![CDATA[Add a global filter to the container.
+ @param name filter name
+ @param classname filter class name
+ @param parameters a map from parameter names to initial values]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A container class for javax.servlet.Filter.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.http.FilterContainer -->
+ <!-- start class org.apache.hadoop.http.FilterInitializer -->
+ <class name="FilterInitializer" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FilterInitializer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Initialize a javax.servlet.Filter.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.http.FilterInitializer -->
+ <!-- start class org.apache.hadoop.http.HttpServer -->
+ <class name="HttpServer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.http.FilterContainer"/>
+ <constructor name="HttpServer" type="java.lang.String, java.lang.String, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as this(name, bindAddress, port, findPort, null);]]>
+ </doc>
+ </constructor>
+ <constructor name="HttpServer" type="java.lang.String, java.lang.String, int, boolean, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a status server on the given port.
+ The jsp scripts are taken from src/webapps/<name>.
+ @param name The name of the server
+ @param port The port to use on the server
+ @param findPort whether the server should start at the given port and
+ increment by 1 until it finds a free port.
+ @param conf Configuration]]>
+ </doc>
+ </constructor>
+ <method name="createBaseListener" return="org.mortbay.jetty.Connector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a required listener for the Jetty instance listening on the port
+ provided. This wrapper and all subclasses must create at least one
+ listener.]]>
+ </doc>
+ </method>
+ <method name="addDefaultApps"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="parent" type="org.mortbay.jetty.handler.ContextHandlerCollection"/>
+ <param name="appDir" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add default apps.
+ @param appDir The application directory
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="addDefaultServlets"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Add default servlets.]]>
+ </doc>
+ </method>
+ <method name="addContext"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ctxt" type="org.mortbay.jetty.servlet.Context"/>
+ <param name="isFiltered" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="addContext"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="dir" type="java.lang.String"/>
+ <param name="isFiltered" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a context
+ @param pathSpec The path spec for the context
+ @param dir The directory containing the context
+ @param isFiltered if true, the servlet is added to the filter path mapping
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Set a value in the webapp context. These values are available to the jsp
+ pages as "application.getAttribute(name)".
+ @param name The name of the attribute
+ @param value The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="addServlet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add a servlet in the server.
+ @param name The name of the servlet (can be passed as null)
+ @param pathSpec The path spec for the servlet
+ @param clazz The servlet class]]>
+ </doc>
+ </method>
+ <method name="addInternalServlet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="this is a temporary method">
+ <param name="name" type="java.lang.String"/>
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add an internal servlet in the server.
+ @param name The name of the servlet (can be passed as null)
+ @param pathSpec The path spec for the servlet
+ @param clazz The servlet class
+ @deprecated this is a temporary method]]>
+ </doc>
+ </method>
+ <method name="addFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="addGlobalFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="defineFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="ctx" type="org.mortbay.jetty.servlet.Context"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="classname" type="java.lang.String"/>
+ <param name="parameters" type="java.util.Map"/>
+ <param name="urls" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Define a filter for a context and set up default url mappings.]]>
+ </doc>
+ </method>
+ <method name="addFilterPathMapping"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pathSpec" type="java.lang.String"/>
+ <param name="webAppCtx" type="org.mortbay.jetty.servlet.Context"/>
+ <doc>
+ <![CDATA[Add the path spec to the filter path mapping.
+ @param pathSpec The path spec
+ @param webAppCtx The WebApplicationContext to add to]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the value in the webapp context.
+ @param name The name of the attribute
+ @return The value of the attribute]]>
+ </doc>
+ </method>
+ <method name="getWebAppsPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the pathname to the webapps files.
+ @return the pathname as a URL
+ @throws IOException if 'webapps' directory cannot be found on CLASSPATH.]]>
+ </doc>
+ </method>
+ <method name="getPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the port that the server is on
+ @return the port]]>
+ </doc>
+ </method>
+ <method name="setThreads"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="min" type="int"/>
+ <param name="max" type="int"/>
+ <doc>
+ <![CDATA[Set the min, max number of worker threads (simultaneous connections).]]>
+ </doc>
+ </method>
+ <method name="addSslListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #addSslListener(InetSocketAddress, Configuration, boolean)}">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="keystore" type="java.lang.String"/>
+ <param name="storPass" type="java.lang.String"/>
+ <param name="keyPass" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Configure an ssl listener on the server.
+ @param addr address to listen on
+ @param keystore location of the keystore
+ @param storPass password for the keystore
+ @param keyPass password for the key
+ @deprecated Use {@link #addSslListener(InetSocketAddress, Configuration, boolean)}]]>
+ </doc>
+ </method>
+ <method name="addSslListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="sslConf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="needClientAuth" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Configure an ssl listener on the server.
+ @param addr address to listen on
+ @param sslConf conf to retrieve ssl options
+ @param needClientAuth whether client authentication is required]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start the server. Does not wait for the server to start.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[stop the server]]>
+ </doc>
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="webServer" type="org.mortbay.jetty.Server"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="listener" type="org.mortbay.jetty.Connector"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="webAppContext" type="org.mortbay.jetty.webapp.WebAppContext"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="findPort" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="defaultContexts" type="java.util.Map"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="filterNames" type="java.util.List"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Create a Jetty embedded server to answer http requests. The primary goal
+ is to serve up status information for the server.
+ There are three contexts:
+ "/logs/" -> points to the log directory
+ "/static/" -> points to common static files (src/webapps/static)
+ "/" -> the jsp server code from (src/webapps/<name>)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.http.HttpServer -->
+ <!-- start class org.apache.hadoop.http.HttpServer.StackServlet -->
+ <class name="HttpServer.StackServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HttpServer.StackServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A very simple servlet to serve up a text representation of the current
+ stack traces. It both returns the stacks to the caller and logs them.
+ Currently the stack traces are done sequentially rather than exactly the
+ same data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.http.HttpServer.StackServlet -->
+</package>
+<package name="org.apache.hadoop.io">
+ <!-- start class org.apache.hadoop.io.AbstractMapWritable -->
+ <class name="AbstractMapWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="AbstractMapWritable"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor.]]>
+ </doc>
+ </constructor>
+ <method name="addToMap"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add a Class to the maps if it is not already present.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="byte"/>
+ <doc>
+ <![CDATA[@return the Class class for the specified id]]>
+ </doc>
+ </method>
+ <method name="getId" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[@return the id for the specified Class]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Used by child copy constructors.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the conf]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[@param conf the conf to set]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Abstract base class for MapWritable and SortedMapWritable
+
+ Unlike org.apache.nutch.crawl.MapWritable, this class allows creation of
+ MapWritable&lt;Writable, MapWritable&gt; so the CLASS_TO_ID and ID_TO_CLASS
+ maps travel with the class instead of being static.
+
+ Class ids range from 1 to 127 so there can be at most 127 distinct classes
+ in any specific map instance.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.AbstractMapWritable -->
+ <!-- start class org.apache.hadoop.io.ArrayFile -->
+ <class name="ArrayFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A dense file-based mapping from integers to values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Reader -->
+ <class name="ArrayFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an array reader for the named file.]]>
+ </doc>
+ </constructor>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader before its <code>n</code>th value.]]>
+ </doc>
+ </method>
+ <method name="next" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read and return the next value in the file.]]>
+ </doc>
+ </method>
+ <method name="key" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the key associated with the most recent call to {@link
+ #seek(long)}, {@link #next(Writable)}, or {@link
+ #get(long,Writable)}.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the <code>n</code>th value in the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Reader -->
+ <!-- start class org.apache.hadoop.io.ArrayFile.Writer -->
+ <class name="ArrayFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file for values of the named class.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a value to the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new array file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayFile.Writer -->
+ <!-- start class org.apache.hadoop.io.ArrayWritable -->
+ <class name="ArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="ArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayWritable" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for arrays containing instances of a class. The elements of this
+ writable must all be instances of the same class. If this writable will be
+ the input for a Reducer, you will need to create a subclass that sets the
+ value to be of the proper type.
+
+ For example:
+ <code>
+ public class IntArrayWritable extends ArrayWritable {
+ public IntArrayWritable() {
+ super(IntWritable.class);
+ }
+ }
+ </code>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ArrayWritable -->
+ <!-- start class org.apache.hadoop.io.BinaryComparable -->
+ <class name="BinaryComparable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="BinaryComparable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLength" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return n st bytes 0..n-1 from {#getBytes()} are valid.]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return representative byte array for this instance.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.BinaryComparable"/>
+ <doc>
+ <![CDATA[Compare bytes from {#getBytes()}.
+ @see org.apache.hadoop.io.WritableComparator#compareBytes(byte[],int,int,byte[],int,int)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Compare bytes from {#getBytes()} to those provided.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true if bytes from {#getBytes()} match.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a hash of the bytes returned from {#getBytes()}.
+ @see org.apache.hadoop.io.WritableComparator#hashBytes(byte[],int)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface supported by {@link org.apache.hadoop.io.WritableComparable}
+ types supporting ordering/permutation by a representative set of bytes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BinaryComparable -->
+ <!-- start class org.apache.hadoop.io.BloomMapFile -->
+ <class name="BloomMapFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BloomMapFile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="delete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="BLOOM_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HASH_COUNT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class extends {@link MapFile} and provides very much the same
+ functionality. However, it uses dynamic Bloom filters to provide
+ quick membership test for keys, and it offers a fast version of
+ {@link Reader#get(WritableComparable, Writable)} operation, especially in
+ case of sparsely populated MapFile-s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BloomMapFile -->
+ <!-- start class org.apache.hadoop.io.BloomMapFile.Reader -->
+ <class name="BloomMapFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BloomMapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="probablyHasKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Checks if this MapFile has the indicated key. The membership test is
+ performed using a Bloom filter, so the result has always non-zero
+ probability of false positives.
+ @param key key to check
+ @return false iff key doesn't exist, true if key probably exists.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fast version of the
+ {@link MapFile.Reader#get(WritableComparable, Writable)} method. First
+ it checks the Bloom filter for the existence of the key, and only if
+ present it performs the real get operation. This yields significant
+ performance improvements for get operations on sparsely populated files.]]>
+ </doc>
+ </method>
+ <method name="getBloomFilter" return="org.apache.hadoop.util.bloom.Filter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the Bloom filter used by this instance of the Reader.
+ @return a Bloom filter (see {@link Filter})]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.BloomMapFile.Reader -->
+ <!-- start class org.apache.hadoop.io.BloomMapFile.Writer -->
+ <class name="BloomMapFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="BloomMapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.BloomMapFile.Writer -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable -->
+ <class name="BooleanWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BooleanWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BooleanWritable" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="get" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the value of the BooleanWritable]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for booleans.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable -->
+ <!-- start class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <class name="BooleanWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BooleanWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BooleanWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BooleanWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.BytesWritable -->
+ <class name="BytesWritable" extends="org.apache.hadoop.io.BinaryComparable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-size sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="BytesWritable" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a BytesWritable using the byte array as the initial value.
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the BytesWritable.
+ @return The data is only valid between 0 and getLength() - 1.]]>
+ </doc>
+ </method>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #getBytes()} instead.">
+ <doc>
+ <![CDATA[Get the data from the BytesWritable.
+ @deprecated Use {@link #getBytes()} instead.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current size of the buffer.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #getLength()} instead.">
+ <doc>
+ <![CDATA[Get the current size of the buffer.
+ @deprecated Use {@link #getLength()} instead.]]>
+ </doc>
+ </method>
+ <method name="setSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Change the size of the buffer. The values in the old range are preserved
+ and any new values are undefined. The capacity is changed if it is
+ necessary.
+ @param size The new number of bytes]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum size that could handled without
+ resizing the backing storage.
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="new_cap" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved.
+ @param new_cap The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="org.apache.hadoop.io.BytesWritable"/>
+ <doc>
+ <![CDATA[Set the BytesWritable to the contents of the given newData.
+ @param newData the value to set this BytesWritable to.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newData" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Set the value to a copy of the given byte range
+ @param newData the new values to copy in
+ @param offset the offset in newData to start at
+ @param length the number of bytes to copy]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="right_obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Are the two byte sequences equal?]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Generate the stream of bytes as hex pairs separated by ' '.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is usable as a key or value.
+ It is resizable and distinguishes between the size of the seqeunce and
+ the current capacity. The hash function is the front of the md5 of the
+ buffer. The sort order is the same as memcmp.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable -->
+ <!-- start class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <class name="BytesWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BytesWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for BytesWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.BytesWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ByteWritable -->
+ <class name="ByteWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="ByteWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ByteWritable" type="byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Set the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this ByteWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a ByteWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two ByteWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for a single byte.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable -->
+ <!-- start class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <class name="ByteWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ByteWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for ByteWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ByteWritable.Comparator -->
+ <!-- start interface org.apache.hadoop.io.Closeable -->
+ <interface name="Closeable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="use java.io.Closeable">
+ <implements name="java.io.Closeable"/>
+ <doc>
+ <![CDATA[@deprecated use java.io.Closeable]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Closeable -->
+ <!-- start class org.apache.hadoop.io.CompressedWritable -->
+ <class name="CompressedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="CompressedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ensureInflated"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Must be called by all methods which access fields to ensure that the data
+ has been uncompressed.]]>
+ </doc>
+ </method>
+ <method name="readFieldsCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #readFields(DataInput)}.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressed"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Subclasses implement this instead of {@link #write(DataOutput)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base-class for Writables which store themselves compressed and lazily
+ inflate on field access. This is useful for large objects whose fields are
+ not be altered during a map or reduce operation: leaving the field data
+ compressed makes copying the instance from one file to another much
+ faster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.CompressedWritable -->
+ <!-- start class org.apache.hadoop.io.DataInputBuffer -->
+ <class name="DataInputBuffer" extends="java.io.DataInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataInputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataInput} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataInputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataInputBuffer buffer = new DataInputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using DataInput methods ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataInputBuffer -->
+ <!-- start class org.apache.hadoop.io.DataOutputBuffer -->
+ <class name="DataOutputBuffer" extends="java.io.DataOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DataOutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <constructor name="DataOutputBuffer" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a DataInput directly into the buffer.]]>
+ </doc>
+ </method>
+ <method name="writeTo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write to a file stream]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link DataOutput} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new DataOutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ DataOutputBuffer buffer = new DataOutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using DataOutput methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DataOutputBuffer -->
+ <!-- start class org.apache.hadoop.io.DefaultStringifier -->
+ <class name="DefaultStringifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Stringifier"/>
+ <constructor name="DefaultStringifier" type="org.apache.hadoop.conf.Configuration, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="fromString" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="store"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="item" type="java.lang.Object"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the item in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to store
+ @param item the object to be stored
+ @param keyName the name of the key to use
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="load" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="storeArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="items" type="java.lang.Object[]"/>
+ <param name="keyName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Stores the array of items in the configuration with the given keyName.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param items the objects to be stored
+ @param keyName the name of the key to use
+ @throws IndexOutOfBoundsException if the items array is empty
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <method name="loadArray" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="keyName" type="java.lang.String"/>
+ <param name="itemClass" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the array of objects from the configuration.
+
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying
+ {@link Serialization} classes.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[DefaultStringifier is the default implementation of the {@link Stringifier}
+ interface which stringifies the objects using base64 encoding of the
+ serialized version of the objects. The {@link Serializer} and
+ {@link Deserializer} are obtained from the {@link SerializationFactory}.
+ <br>
+ DefaultStringifier offers convenience methods to store/load objects to/from
+ the configuration.
+
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DefaultStringifier -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable -->
+ <class name="DoubleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="DoubleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DoubleWritable" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="double"/>
+ </method>
+ <method name="get" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a DoubleWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Writable for Double values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable -->
+ <!-- start class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <class name="DoubleWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DoubleWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for DoubleWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.DoubleWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.FloatWritable -->
+ <class name="FloatWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="FloatWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FloatWritable" type="float"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="float"/>
+ <doc>
+ <![CDATA[Set the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this FloatWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a FloatWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two FloatWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for floats.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable -->
+ <!-- start class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <class name="FloatWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FloatWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for FloatWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.FloatWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.GenericWritable -->
+ <class name="GenericWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="GenericWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[Set the instance that is wrapped.
+
+ @param obj]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the wrapped instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTypes" return="java.lang.Class[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return all classes that may be wrapped. Subclasses should implement this
+ to return a constant array of classes.]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper for Writable instances.
+ <p>
+ When two sequence files, which have same Key type but different Value
+ types, are mapped out to reduce, multiple Value types is not allowed.
+ In this case, this class can help you wrap instances with different types.
+ </p>
+
+ <p>
+ Compared with <code>ObjectWritable</code>, this class is much more effective,
+ because <code>ObjectWritable</code> will append the class declaration as a String
+ into the output file in every Key-Value pair.
+ </p>
+
+ <p>
+ Generic Writable implements {@link Configurable} interface, so that it will be
+ configured by the framework. The configuration is passed to the wrapped objects
+ implementing {@link Configurable} interface <i>before deserialization</i>.
+ </p>
+
+ how to use it: <br>
+ 1. Write your own class, such as GenericObject, which extends GenericWritable.<br>
+ 2. Implements the abstract method <code>getTypes()</code>, defines
+ the classes which will be wrapped in GenericObject in application.
+ Attention: this classes defined in <code>getTypes()</code> method, must
+ implement <code>Writable</code> interface.
+ <br><br>
+
+ The code looks like this:
+ <blockquote><pre>
+ public class GenericObject extends GenericWritable {
+
+ private static Class[] CLASSES = {
+ ClassType1.class,
+ ClassType2.class,
+ ClassType3.class,
+ };
+
+ protected Class[] getTypes() {
+ return CLASSES;
+ }
+
+ }
+ </pre></blockquote>
+
+ @since Nov 8, 2006]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.GenericWritable -->
+ <!-- start class org.apache.hadoop.io.InputBuffer -->
+ <class name="InputBuffer" extends="java.io.FilterInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Resets the data that the buffer reads.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current position in the input.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link InputStream} implementation that reads from an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new InputStream and
+ ByteArrayInputStream each time data is read.
+
+ <p>Typical usage is something like the following:<pre>
+
+ InputBuffer buffer = new InputBuffer();
+ while (... loop condition ...) {
+ byte[] data = ... get data ...;
+ int dataLength = ... get data length ...;
+ buffer.reset(data, dataLength);
+ ... read buffer using InputStream methods ...
+ }
+ </pre>
+ @see DataInputBuffer
+ @see DataOutput]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.InputBuffer -->
+ <!-- start class org.apache.hadoop.io.IntWritable -->
+ <class name="IntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="IntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="IntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this IntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a IntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two IntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for ints.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable -->
+ <!-- start class org.apache.hadoop.io.IntWritable.Comparator -->
+ <class name="IntWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IntWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for IntWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IntWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.IOUtils -->
+ <class name="IOUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="buffSize" type="int"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param buffSize the size of the buffer
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another. <strong>closes the input and output streams
+ at the end</strong>.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object]]>
+ </doc>
+ </method>
+ <method name="copyBytes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="close" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copies from one stream to another.
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object
+ @param close whether or not close the InputStream and
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+ </doc>
+ </method>
+ <method name="readFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="buf" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads len bytes in a loop.
+ @param in The InputStream to read from
+ @param buf The buffer to fill
+ @param off offset from the buffer
+ @param len the length of bytes to read
+ @throws IOException if it could not read requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="len" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Similar to readFully(). Skips bytes in a loop.
+ @param in The InputStream to skip bytes from
+ @param len number of bytes to skip.
+ @throws IOException if it could not skip requested number of bytes
+ for any reason (including EOF)]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="closeables" type="java.io.Closeable[]"/>
+ <doc>
+ <![CDATA[Close the Closeable objects and <b>ignore</b> any {@link IOException} or
+ null pointers. Must only be used for cleanup in exception handlers.
+ @param log the log to record problems to at debug level. Can be null.
+ @param closeables the objects to close]]>
+ </doc>
+ </method>
+ <method name="closeStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Closeable"/>
+ <doc>
+ <![CDATA[Closes the stream ignoring {@link IOException}.
+ Must only be called in cleaning up from exception handlers.
+ @param stream the Stream to close]]>
+ </doc>
+ </method>
+ <method name="closeSocket"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sock" type="java.net.Socket"/>
+ <doc>
+ <![CDATA[Closes the socket ignoring {@link IOException}
+ @param sock the Socket to close]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An utility class for I/O related functionality.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils -->
+ <!-- start class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <class name="IOUtils.NullOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IOUtils.NullOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[/dev/null of OutputStreams.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.IOUtils.NullOutputStream -->
+ <!-- start class org.apache.hadoop.io.LongWritable -->
+ <class name="LongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="LongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a LongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two LongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable -->
+ <!-- start class org.apache.hadoop.io.LongWritable.Comparator -->
+ <class name="LongWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <class name="LongWritable.DecreasingComparator" extends="org.apache.hadoop.io.LongWritable.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongWritable.DecreasingComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A decreasing Comparator optimized for LongWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.LongWritable.DecreasingComparator -->
+ <!-- start class org.apache.hadoop.io.MapFile -->
+ <class name="MapFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="rename"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="oldName" type="java.lang.String"/>
+ <param name="newName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Renames an existing map directory.]]>
+ </doc>
+ </method>
+ <method name="delete"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deletes the named map file.]]>
+ </doc>
+ </method>
+ <method name="fix" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valueClass" type="java.lang.Class"/>
+ <param name="dryrun" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This method attempts to fix a corrupt MapFile by re-creating its index.
+ @param fs filesystem
+ @param dir directory containing the MapFile data and index
+ @param keyClass key class (has to be a subclass of Writable)
+ @param valueClass value class (has to be a subclass of Writable)
+ @param dryrun do not perform any changes, just report what needs to be done
+ @return number of valid entries in this MapFile, or -1 if no fixing was needed
+ @throws Exception]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <field name="INDEX_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the index file.]]>
+ </doc>
+ </field>
+ <field name="DATA_FILE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The name of the data file.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A file-based map from keys to values.
+
+ <p>A map is a directory containing two files, the <code>data</code> file,
+ containing all keys and values in the map, and a smaller <code>index</code>
+ file, containing a fraction of the keys. The fraction is determined by
+ {@link Writer#getIndexInterval()}.
+
+ <p>The index file is read entirely into memory. Thus key implementations
+ should try to keep themselves small.
+
+ <p>Map files are created by adding entries in-order. To maintain a large
+ database, perform updates by copying the previous version of a database and
+ merging in a sorted change list, to create a new version of the database in
+ a new file. Sorting large change lists can be done with {@link
+ SequenceFile.Sorter}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile -->
+ <!-- start class org.apache.hadoop.io.MapFile.Reader -->
+ <class name="MapFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a map reader for the named map using the named comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Hook to allow subclasses to defer opening streams until further
+ initialization is complete.
+ @see #createDataFileReader(FileSystem, Path, Configuration)]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="open"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dirName" type="java.lang.String"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDataFileReader" return="org.apache.hadoop.io.SequenceFile.Reader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dataFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link SequenceFile.Reader} returned.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Re-positions the reader before its first key.]]>
+ </doc>
+ </method>
+ <method name="midKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the key at approximately the middle of the file.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="finalKey"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the final key from the file.
+
+ @param key key to read into]]>
+ </doc>
+ </method>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Positions the reader at the named key, or if none such exists, at the
+ first entry after the named key. Returns true iff the named key exists
+ in this map.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the map into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ the end of the map]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the value for the named key, or null if none exists.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+ Returns <code>key</code> or if it does not exist, at the first entry
+ after the named key.
+
+- * @param key - key that we're trying to find
+- * @param val - data value if key is found
+- * @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="getClosest" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <param name="before" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finds the record that is the closest match to the specified key.
+
+ @param key - key that we're trying to find
+ @param val - data value if key is found
+ @param before - IF true, and <code>key</code> does not exist, return
+ the first entry that falls just before the <code>key</code>. Otherwise,
+ return the record that sorts just after.
+ @return - the key that was the closest match or null if eof.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Reader -->
+ <!-- start class org.apache.hadoop.io.MapFile.Writer -->
+ <class name="MapFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map for keys of the named class.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named map using the named key comparator.]]>
+ </doc>
+ </constructor>
+ <method name="getIndexInterval" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of entries that are added before an index entry is added.]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval.
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="setIndexInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="interval" type="int"/>
+ <doc>
+ <![CDATA[Sets the index interval and stores it in conf
+ @see #getIndexInterval()]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the map.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair to the map. The key must be greater or equal
+ to the previous key added to the map.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writes a new map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapFile.Writer -->
+ <!-- start class org.apache.hadoop.io.MapWritable -->
+ <class name="MapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Map"/>
+ <constructor name="MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="MapWritable" type="org.apache.hadoop.io.MapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable Map.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MapWritable -->
+ <!-- start class org.apache.hadoop.io.MD5Hash -->
+ <class name="MD5Hash" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="MD5Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash from a hex string.]]>
+ </doc>
+ </constructor>
+ <constructor name="MD5Hash" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an MD5Hash with a specified value.]]>
+ </doc>
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs, reads and returns an instance.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+ <doc>
+ <![CDATA[Copy the contents of another instance into this instance.]]>
+ </doc>
+ </method>
+ <method name="getDigest" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the digest bytes.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a hash value for the content from the InputStream.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Construct a hash value for a byte array.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Construct a hash value for a String.]]>
+ </doc>
+ </method>
+ <method name="halfDigest" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a half-sized version of this MD5. Fits in a long]]>
+ </doc>
+ </method>
+ <method name="quarterDigest" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a 32-bit digest of the MD5.
+ @return the first 4 bytes of the md5]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is an MD5Hash whose digest contains the
+ same values.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code value for this object.
+ Only uses the first 4 bytes, since md5s are evenly distributed.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+ <doc>
+ <![CDATA[Compares this object with the specified object for order.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="setDigest"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the digest value from a hex string.]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Writable for MD5 hash values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash -->
+ <!-- start class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <class name="MD5Hash.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MD5Hash.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for MD5Hash keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MD5Hash.Comparator -->
+ <!-- start class org.apache.hadoop.io.MultipleIOException -->
+ <class name="MultipleIOException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getExceptions" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the underlying exceptions]]>
+ </doc>
+ </method>
+ <method name="createIOException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="exceptions" type="java.util.List"/>
+ <doc>
+ <![CDATA[A convenient method to create an {@link IOException}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Encapsulate a list of {@link IOException} into an {@link IOException}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.MultipleIOException -->
+ <!-- start class org.apache.hadoop.io.NullWritable -->
+ <class name="NullWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <method name="get" return="org.apache.hadoop.io.NullWritable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the single instance of this class.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Singleton Writable with no data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable -->
+ <!-- start class org.apache.hadoop.io.NullWritable.Comparator -->
+ <class name="NullWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullWritable.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Compare the buffers in serialized form.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator &quot;optimized&quot; for NullWritable.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.NullWritable.Comparator -->
+ <!-- start class org.apache.hadoop.io.ObjectWritable -->
+ <class name="ObjectWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="ObjectWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ObjectWritable" type="java.lang.Class, java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the instance, or null if none.]]>
+ </doc>
+ </method>
+ <method name="getDeclaredClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the class this is meant to be.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Reset the instance.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeObject"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="instance" type="java.lang.Object"/>
+ <param name="declaredClass" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="readObject" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="objectWritable" type="org.apache.hadoop.io.ObjectWritable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A polymorphic Writable that writes an instance with it's class name.
+ Handles arrays, strings and primitive types without a Writable wrapper.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.ObjectWritable -->
+ <!-- start class org.apache.hadoop.io.OutputBuffer -->
+ <class name="OutputBuffer" extends="java.io.FilterOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OutputBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new empty buffer.]]>
+ </doc>
+ </constructor>
+ <method name="getData" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current contents of the buffer.
+ Data is only valid to {@link #getLength()}.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the length of the valid data currently in the buffer.]]>
+ </doc>
+ </method>
+ <method name="reset" return="org.apache.hadoop.io.OutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets the buffer to empty.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes bytes from a InputStream directly into the buffer.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A reusable {@link OutputStream} implementation that writes to an in-memory
+ buffer.
+
+ <p>This saves memory over creating a new OutputStream and
+ ByteArrayOutputStream each time data is written.
+
+ <p>Typical usage is something like the following:<pre>
+
+ OutputBuffer buffer = new OutputBuffer();
+ while (... loop condition ...) {
+ buffer.reset();
+ ... write buffer using OutputStream methods ...
+ byte[] data = buffer.getData();
+ int dataLength = buffer.getLength();
+ ... write data to its ultimate destination ...
+ }
+ </pre>
+ @see DataOutputBuffer
+ @see InputBuffer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.OutputBuffer -->
+ <!-- start interface org.apache.hadoop.io.RawComparator -->
+ <interface name="RawComparator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.Comparator"/>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link Comparator} that operates directly on byte representations of
+ objects.
+ </p>
+ @param <T>
+ @see DeserializerComparator]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.RawComparator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile -->
+ <class name="SequenceFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the compression type for the reduce outputs
+ @param job the job config to look in
+ @return the kind of compression to use
+ @deprecated Use
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)}
+ to get {@link CompressionType} for job-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or">
+ <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the compression type for sequence files.
+ @param job the configuration to modify
+ @param val the new compression type (none, block, record)
+ @deprecated Use the one of the many SequenceFile.createWriter methods to specify
+ the {@link CompressionType} while creating the {@link SequenceFile} or
+ {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)}
+ to specify the {@link CompressionType} for job-outputs.
+ or]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="bufferSize" type="int"/>
+ <param name="replication" type="short"/>
+ <param name="blockSize" type="long"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param bufferSize buffer size for the underlaying outputstream.
+ @param replication replication factor for the file.
+ @param blockSize block size for the file.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="name" type="org.apache.hadoop.fs.Path"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valClass" type="java.lang.Class"/>
+ <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="SYNC_INTERVAL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes between sync points.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<code>SequenceFile</code>s are flat files consisting of binary key/value
+ pairs.
+
+ <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and
+ {@link Sorter} classes for writing, reading and sorting respectively.</p>
+
+ There are three <code>SequenceFile</code> <code>Writer</code>s based on the
+ {@link CompressionType} used to compress key/value pairs:
+ <ol>
+ <li>
+ <code>Writer</code> : Uncompressed records.
+ </li>
+ <li>
+ <code>RecordCompressWriter</code> : Record-compressed files, only compress
+ values.
+ </li>
+ <li>
+ <code>BlockCompressWriter</code> : Block-compressed files, both keys &
+ values are collected in 'blocks'
+ separately and compressed. The size of
+ the 'block' is configurable.
+ </ol>
+
+ <p>The actual compression algorithm used to compress key and/or values can be
+ specified by using the appropriate {@link CompressionCodec}.</p>
+
+ <p>The recommended way is to use the static <tt>createWriter</tt> methods
+ provided by the <code>SequenceFile</code> to chose the preferred format.</p>
+
+ <p>The {@link Reader} acts as the bridge and can read any of the above
+ <code>SequenceFile</code> formats.</p>
+
+ <h4 id="Formats">SequenceFile Formats</h4>
+
+ <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
+ depending on the <code>CompressionType</code> specified. All of them share a
+ <a href="#Header">common header</a> described below.
+
+ <h5 id="Header">SequenceFile Header</h5>
+ <ul>
+ <li>
+ version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual
+ version number (e.g. SEQ4 or SEQ6)
+ </li>
+ <li>
+ keyClassName -key class
+ </li>
+ <li>
+ valueClassName - value class
+ </li>
+ <li>
+ compression - A boolean which specifies if compression is turned on for
+ keys/values in this file.
+ </li>
+ <li>
+ blockCompression - A boolean which specifies if block-compression is
+ turned on for keys/values in this file.
+ </li>
+ <li>
+ compression codec - <code>CompressionCodec</code> class which is used for
+ compression of keys and/or values (if compression is
+ enabled).
+ </li>
+ <li>
+ metadata - {@link Metadata} for this file.
+ </li>
+ <li>
+ sync - A sync marker to denote end of the header.
+ </li>
+ </ul>
+
+ <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li>Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+ <ul>
+ <li>Record length</li>
+ <li>Key length</li>
+ <li>Key</li>
+ <li><i>Compressed</i> Value</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record <i>Block</i>
+ <ul>
+ <li>Compressed key-lengths block-size</li>
+ <li>Compressed key-lengths block</li>
+ <li>Compressed keys block-size</li>
+ <li>Compressed keys block</li>
+ <li>Compressed value-lengths block-size</li>
+ <li>Compressed value-lengths block</li>
+ <li>Compressed values block-size</li>
+ <li>Compressed values block</li>
+ </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> bytes or so.
+ </li>
+ </ul>
+
+ <p>The compressed blocks of key lengths and value lengths consist of the
+ actual lengths of individual keys/values encoded in ZeroCompressedInteger
+ format.</p>
+
+ @see CompressionCodec]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <class name="SequenceFile.CompressionType" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.SequenceFile.CompressionType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NONE" type="org.apache.hadoop.io.SequenceFile.CompressionType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do not compress records.]]>
+ </doc>
+ </field>
+ <field name="RECORD" type="org.apache.hadoop.io.SequenceFile.CompressionType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compress values only, each separately.]]>
+ </doc>
+ </field>
+ <field name="BLOCK" type="org.apache.hadoop.io.SequenceFile.CompressionType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compress sequences of records together in blocks.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The compression type used to compress key/value pairs in the
+ {@link SequenceFile}.
+
+ @see SequenceFile.Writer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.CompressionType -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <class name="SequenceFile.Metadata" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFile.Metadata" type="java.util.TreeMap"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ </method>
+ <method name="getMetadata" return="java.util.TreeMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[The class encapsulating with the metadata of a file.
+ The metadata of a file is a list of attribute name/value
+ pairs of Text type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Metadata -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Reader -->
+ <class name="SequenceFile.Reader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Reader" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the named file.]]>
+ </doc>
+ </constructor>
+ <method name="openFile" return="org.apache.hadoop.fs.FSDataInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <param name="bufferSize" type="int"/>
+ <param name="length" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Override this method to specialize the type of
+ {@link FSDataInputStream} returned.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the key class.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the value class.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="isCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if values are compressed.]]>
+ </doc>
+ </method>
+ <method name="isBlockCompressed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if records are block-compressed.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="getMetadata" return="org.apache.hadoop.io.SequenceFile.Metadata"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the metadata object of the file]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the 'value' corresponding to the last read 'key'.
+ @param val : The 'value' to be read.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file into <code>key</code>, skipping its
+ value. True if another entry exists, and false at end of file.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key/value pair in the file into <code>key</code> and
+ <code>val</code>. Returns true if such a pair exists and false when at
+ end of file]]>
+ </doc>
+ </method>
+ <method name="next" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.">
+ <param name="buffer" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.]]>
+ </doc>
+ </method>
+ <method name="createValueBytes" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRaw" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' records.
+ @param key - The buffer into which the key is read
+ @param val - The 'raw' value
+ @return Returns the total record length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawKey" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' keys.
+ @param key - The buffer into which the key is read
+ @return Returns the key length or -1 for end of file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="java.lang.Object"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in the file, skipping its
+ value. Return null at end of file.]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read 'raw' values.
+ @param val - The 'raw' value
+ @return Returns the value length
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the current byte position in the input file.
+
+ <p>The position passed must be a position returned by {@link
+ SequenceFile.Writer#getLength()} when writing this file. To seek to an arbitrary
+ position, use {@link SequenceFile.Reader#sync(long)}.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Seek to the next sync mark past a given position.]]>
+ </doc>
+ </method>
+ <method name="syncSeen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true iff the previous call to next passed a sync mark.]]>
+ </doc>
+ </method>
+ <method name="getPosition" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the current byte position in the input file.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the name of the file.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reads key/value pairs from a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <class name="SequenceFile.Sorter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge files containing the named classes.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.io.RawComparator, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Sort and merge using an arbitrary {@link RawComparator}.]]>
+ </doc>
+ </constructor>
+ <method name="setFactor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="factor" type="int"/>
+ <doc>
+ <![CDATA[Set the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="getFactor" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of streams to merge at once.]]>
+ </doc>
+ </method>
+ <method name="setMemory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="memory" type="int"/>
+ <doc>
+ <![CDATA[Set the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="getMemory" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total amount of buffer memory, in bytes.]]>
+ </doc>
+ </method>
+ <method name="setProgressable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progressable" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Set the progressable object in order to report progress.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files into an output file.
+ @param inFiles the files to be sorted
+ @param outFile the sorted output file
+ @param deleteInput should the input files be deleted as they are read?]]>
+ </doc>
+ </method>
+ <method name="sortAndIterate" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInput" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Perform a file sort from a set of input files and return an iterator.
+ @param inFiles the files to be sorted
+ @param tempDir the directory where temp files are created during sort
+ @param deleteInput should the input files be deleted as they are read?
+ @return iterator the RawKeyValueIterator]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The backwards compatible interface to sort.
+ @param inFile the input file to sort
+ @param outFile the sorted output file]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="segments" type="java.util.List"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the list of segments of type <code>SegmentDescriptor</code>
+ @param segments the list of SegmentDescriptors
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIterator
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[] using a max factor value
+ that is already set
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="deleteInputs" type="boolean"/>
+ <param name="factor" type="int"/>
+ <param name="tmpDir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @param factor the factor that will be used as the maximum merge fan-in
+ @param tmpDir the directory to write temporary files into
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inNames" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="tempDir" type="org.apache.hadoop.fs.Path"/>
+ <param name="deleteInputs" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merges the contents of files passed in Path[]
+ @param inNames the array of path names
+ @param tempDir the directory for creating temp files during merge
+ @param deleteInputs true if the input files should be deleted when
+ unnecessary
+ @return RawKeyValueIteratorMergeQueue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cloneFileAttributes" return="org.apache.hadoop.io.SequenceFile.Writer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="outputFile" type="org.apache.hadoop.fs.Path"/>
+ <param name="prog" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clones the attributes (like compression of the input file and creates a
+ corresponding Writer
+ @param inputFile the path of the input file whose attributes should be
+ cloned
+ @param outputFile the path of the output file
+ @param prog the Progressable to report status during the file write
+ @return Writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="records" type="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"/>
+ <param name="writer" type="org.apache.hadoop.io.SequenceFile.Writer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes records from RawKeyValueIterator into a file represented by the
+ passed writer
+ @param records the RawKeyValueIterator
+ @param writer the Writer created earlier
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="merge"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/>
+ <param name="outFile" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Merge the provided files.
+ @param inFiles the array of input path names
+ @param outFile the final output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sorts key/value pairs in a sequence-format file.
+
+ <p>For best performance, applications should make sure that the {@link
+ Writable#readFields(DataInput)} implementation of their keys is
+ very efficient. In particular, it should avoid allocating memory.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <interface name="SequenceFile.Sorter.RawKeyValueIterator" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw key
+ @return DataOutputBuffer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getValue" return="org.apache.hadoop.io.SequenceFile.ValueBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw value
+ @return ValueBytes
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets up the current key and value (for getKey and getValue)
+ @return true if there exists a key/value, false otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[closes the iterator so that the underlying streams can be closed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the Progress object; this has a float (0.0 - 1.0)
+ indicating the bytes processed by the iterator so far]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to iterate over raw keys/values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <class name="SequenceFile.Sorter.SegmentDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="SequenceFile.Sorter.SegmentDescriptor" type="long, long, org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a segment
+ @param segmentOffset the offset of the segment in the file
+ @param segmentLength the length of the segment
+ @param segmentPathName the path name of the file containing the segment]]>
+ </doc>
+ </constructor>
+ <method name="doSync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do the sync checks]]>
+ </doc>
+ </method>
+ <method name="preserveInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="preserve" type="boolean"/>
+ <doc>
+ <![CDATA[Whether to delete the files when no longer needed]]>
+ </doc>
+ </method>
+ <method name="shouldPreserveInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="nextRawKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the rawKey object with the key returned by the Reader
+ @return true if there is a key returned; false, otherwise
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="nextRawValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rawValue" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills up the passed rawValue with the value corresponding to the key
+ read earlier
+ @param rawValue
+ @return the length of the value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the stored rawKey]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The default cleanup. Subclasses can override this with a custom
+ cleanup]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class defines a merge segment. This class can be subclassed to
+ provide a customized cleanup method implementation. In this
+ implementation, cleanup closes the file handle and deletes the file]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor -->
+ <!-- start interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <interface name="SequenceFile.ValueBytes" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the uncompressed bytes to the outStream.
+ @param outStream : Stream to write uncompressed bytes into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to outStream.
+ Note: that it will NOT compress the bytes if they are not compressed.
+ @param outStream : Stream to write compressed bytes into.]]>
+ </doc>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Size of stored data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface to 'raw' values of SequenceFiles.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.SequenceFile.ValueBytes -->
+ <!-- start class org.apache.hadoop.io.SequenceFile.Writer -->
+ <class name="SequenceFile.Writer" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, int, short, long, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named file with write-progress reporter.]]>
+ </doc>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of keys in this file.]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the class of values in this file.]]>
+ </doc>
+ </method>
+ <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compression codec of data in this file.]]>
+ </doc>
+ </method>
+ <method name="sync"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create a sync point]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the file.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Writable"/>
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key/value pair.]]>
+ </doc>
+ </method>
+ <method name="appendRaw"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keyData" type="byte[]"/>
+ <param name="keyOffset" type="int"/>
+ <param name="keyLength" type="int"/>
+ <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current length of the output file.
+
+ <p>This always returns a synchronized position. In other words,
+ immediately after calling {@link SequenceFile.Reader#seek(long)} with a position
+ returned by this method, {@link SequenceFile.Reader#next(Writable)} may be called. However
+ the key may be earlier in the file than key last written when this
+ method was called (e.g., with block-compression, it may be the first key
+ in the block that was being written when this method was called).]]>
+ </doc>
+ </method>
+ <field name="keySerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="uncompressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="compressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Write key/value pairs to a sequence-format file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SequenceFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SetFile -->
+ <class name="SetFile" extends="org.apache.hadoop.io.MapFile"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A file-based set of keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile -->
+ <!-- start class org.apache.hadoop.io.SetFile.Reader -->
+ <class name="SetFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a set reader for the named set using the named comparator.]]>
+ </doc>
+ </constructor>
+ <method name="seek" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next key in a set into <code>key</code>. Returns
+ true if such a key exists and false when at the end of the set.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the matching key from a set into <code>key</code>.
+ Returns <code>key</code>, or null if no match exists.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide access to an existing set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Reader -->
+ <!-- start class org.apache.hadoop.io.SetFile.Writer -->
+ <class name="SetFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="pass a Configuration too">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the named set for keys of the named class.
+ @deprecated pass a Configuration too]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element class and compression type.]]>
+ </doc>
+ </constructor>
+ <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.io.SequenceFile.CompressionType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a set naming the element comparator and compression type.]]>
+ </doc>
+ </constructor>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Append a key to a set. The key must be strictly greater than the
+ previous key added to the set.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Write a new set file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SetFile.Writer -->
+ <!-- start class org.apache.hadoop.io.SortedMapWritable -->
+ <class name="SortedMapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.SortedMap"/>
+ <constructor name="SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor.]]>
+ </doc>
+ </constructor>
+ <constructor name="SortedMapWritable" type="org.apache.hadoop.io.SortedMapWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Copy constructor.
+
+ @param other the map to copy from]]>
+ </doc>
+ </constructor>
+ <method name="comparator" return="java.util.Comparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="firstKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="headMap" return="java.util.SortedMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="lastKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="subMap" return="java.util.SortedMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="tailMap" return="java.util.SortedMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="containsValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="entrySet" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="isEmpty" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="keySet" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="put" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="putAll"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.util.Map"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="remove" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="values" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Writable SortedMap.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.SortedMapWritable -->
+ <!-- start interface org.apache.hadoop.io.Stringifier -->
+ <interface name="Stringifier" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Converts the object to a string representation
+ @param obj the object to convert
+ @return the string representation of the object
+ @throws IOException if the object cannot be converted]]>
+ </doc>
+ </method>
+ <method name="fromString" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Restores the object from its string representation.
+ @param str the string representation of the object
+ @return restored object
+ @throws IOException if the object cannot be restored]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes this object.
+ @throws IOException if an I/O error occurs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Stringifier interface offers two methods to convert an object
+ to a string representation and restore the object given its
+ string representation.
+ @param <T> the class of the objects to stringify]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Stringifier -->
+ <!-- start class org.apache.hadoop.io.Text -->
+ <class name="Text" extends="org.apache.hadoop.io.BinaryComparable"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Text" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a string.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="org.apache.hadoop.io.Text"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from another text.]]>
+ </doc>
+ </constructor>
+ <constructor name="Text" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a byte array.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the raw bytes; however, only data up to {@link #getLength()} is
+ valid.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of bytes in the byte array]]>
+ </doc>
+ </method>
+ <method name="charAt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="position" type="int"/>
+ <doc>
+ <![CDATA[Returns the Unicode Scalar Value (32-bit integer value)
+ for the character at <code>position</code>. Note that this
+ method avoids using the converter or doing String instatiation
+ @return the Unicode scalar value at position or -1
+ if the position is invalid or points to a
+ trailing byte]]>
+ </doc>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ </method>
+ <method name="find" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="what" type="java.lang.String"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Finds any occurence of <code>what</code> in the backing
+ buffer, starting as position <code>start</code>. The starting
+ position is measured in bytes and the return value is in
+ terms of byte position in the buffer. The backing buffer is
+ not converted to a string for this operation.
+ @return byte position of the first occurence of the search
+ string in the UTF-8 buffer or -1 if not found]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <doc>
+ <![CDATA[Set to a utf8 byte array]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[copy a text.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Set the Text to range of bytes
+ @param utf8 the data to copy from
+ @param start the first position of the new string
+ @param len the number of bytes of the new string]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Append a range of bytes to the end of the given text
+ @param utf8 the data to copy from
+ @param start the first position to append from utf8
+ @param len the number of bytes to append]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Clear the string to empty.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert text back to string
+ @see java.lang.Object#toString()]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[deserialize]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one Text in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[serialize
+ write this object to out
+ length uses zero-compressed encoding
+ @see Writable#write(DataOutput)]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a Text with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If the input is malformed,
+ replace by a default value.]]>
+ </doc>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ </method>
+ <method name="decode" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If the input is malformed,
+ invalid chars are replaced by a default value.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="encode" return="java.nio.ByteBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <param name="replace" type="boolean"/>
+ <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+ <doc>
+ <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.
+ @return ByteBuffer: bytes stores at ByteBuffer.array()
+ and length is ByteBuffer.limit()]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF8 encoded string from in]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF8 encoded string to out]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check if a byte array contains valid utf-8
+ @param utf8 byte array
+ @throws MalformedInputException if the byte array contains invalid utf-8]]>
+ </doc>
+ </method>
+ <method name="validateUTF8"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf8" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+ <doc>
+ <![CDATA[Check to see if a byte array is valid utf-8
+ @param utf8 the array of bytes
+ @param start the offset of the first byte in the array
+ @param len the length of the byte sequence
+ @throws MalformedInputException if the byte array contains invalid bytes]]>
+ </doc>
+ </method>
+ <method name="bytesToCodePoint" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="java.nio.ByteBuffer"/>
+ <doc>
+ <![CDATA[Returns the next code point at the current position in
+ the buffer. The buffer's position will be incremented.
+ Any mark set on this buffer will be changed by this method!]]>
+ </doc>
+ </method>
+ <method name="utf8Length" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[For the given string, returns the number of UTF-8 bytes
+ required to encode the string.
+ @param string text to encode
+ @return number of UTF-8 bytes required to encode]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class stores text using standard UTF8 encoding. It provides methods
+ to serialize, deserialize, and compare texts at byte level. The type of
+ length is integer and is serialized using zero-compressed format. <p>In
+ addition, it provides methods for string traversal without converting the
+ byte array to a string. <p>Also includes utilities for
+ serializing/deserialing a string, coding/decoding a string, checking if a
+ byte array contains valid UTF8 code, calculating the length of an encoded
+ string.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text -->
+ <!-- start class org.apache.hadoop.io.Text.Comparator -->
+ <class name="Text.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Text.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for Text keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.Text.Comparator -->
+ <!-- start class org.apache.hadoop.io.TwoDArrayWritable -->
+ <class name="TwoDArrayWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TwoDArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[][]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toArray" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="values" type="org.apache.hadoop.io.Writable[][]"/>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable[][]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A Writable for 2D arrays containing a matrix of instances of a class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.TwoDArrayWritable -->
+ <!-- start class org.apache.hadoop.io.UTF8 -->
+ <class name="UTF8" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="replaced by Text">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="UTF8" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <constructor name="UTF8" type="org.apache.hadoop.io.UTF8"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct from a given string.]]>
+ </doc>
+ </constructor>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The raw bytes.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the encoded string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.io.UTF8"/>
+ <doc>
+ <![CDATA[Set to contain the contents of a string.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skips over one UTF8 in the input.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compare two UTF8s.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert to a String.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a UTF8 with the same contents.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convert a string to a UTF-8 encoded byte array.
+ @see String#getBytes(String)]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string.
+
+ @see DataInput#readUTF()]]>
+ </doc>
+ </method>
+ <method name="writeString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a UTF-8 encoded string.
+
+ @see DataOutput#writeUTF(String)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for strings that uses the UTF8 encoding.
+
+ <p>Also includes utilities for efficiently reading and writing UTF-8.
+
+ @deprecated replaced by Text]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8 -->
+ <!-- start class org.apache.hadoop.io.UTF8.Comparator -->
+ <class name="UTF8.Comparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UTF8.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.UTF8.Comparator -->
+ <!-- start class org.apache.hadoop.io.VersionedWritable -->
+ <class name="VersionedWritable" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="VersionedWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="byte"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the version number of the current implementation.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A base class for Writables that provides version checking.
+
+ <p>This is useful when a class may evolve, so that instances written by the
+ old version of the class may still be processed by the new version. To
+ handle this situation, {@link #readFields(DataInput)}
+ implementations should catch {@link VersionMismatchException}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionedWritable -->
+ <!-- start class org.apache.hadoop.io.VersionMismatchException -->
+ <class name="VersionMismatchException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionMismatchException" type="byte, byte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string representation of this object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Thrown by {@link VersionedWritable#readFields(DataInput)} when the
+ version of an object being read does not match the current implementation
+ version as returned by {@link VersionedWritable#getVersion()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VersionMismatchException -->
+ <!-- start class org.apache.hadoop.io.VIntWritable -->
+ <class name="VIntWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VIntWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VIntWritable" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="int"/>
+ <doc>
+ <![CDATA[Set the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this VIntWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VIntWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VIntWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for integer values stored in variable-length format.
+ Such values take between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVInt(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VIntWritable -->
+ <!-- start class org.apache.hadoop.io.VLongWritable -->
+ <class name="VLongWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="VLongWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="VLongWritable" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="long"/>
+ <doc>
+ <![CDATA[Set the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the value of this LongWritable.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns true iff <code>o</code> is a VLongWritable with the same value.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Compares two VLongWritables.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A WritableComparable for longs in a variable-length format. Such values take
+ between one and five bytes. Smaller values take fewer bytes.
+
+ @see org.apache.hadoop.io.WritableUtils#readVLong(DataInput)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.VLongWritable -->
+ <!-- start interface org.apache.hadoop.io.Writable -->
+ <interface name="Writable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the fields of this object to <code>out</code>.
+
+ @param out <code>DataOuput</code> to serialize this object into.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the fields of this object from <code>in</code>.
+
+ <p>For efficiency, implementations should attempt to re-use storage in the
+ existing object where possible.</p>
+
+ @param in <code>DataInput</code> to deseriablize this object from.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A serializable object which implements a simple, efficient, serialization
+ protocol, based on {@link DataInput} and {@link DataOutput}.
+
+ <p>Any <code>key</code> or <code>value</code> type in the Hadoop Map-Reduce
+ framework implements this interface.</p>
+
+ <p>Implementations typically implement a static <code>read(DataInput)</code>
+ method which constructs a new instance, calls {@link #readFields(DataInput)}
+ and returns the instance.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritable implements Writable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public static MyWritable read(DataInput in) throws IOException {
+ MyWritable w = new MyWritable();
+ w.readFields(in);
+ return w;
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.Writable -->
+ <!-- start interface org.apache.hadoop.io.WritableComparable -->
+ <interface name="WritableComparable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Comparable"/>
+ <doc>
+ <![CDATA[A {@link Writable} which is also {@link Comparable}.
+
+ <p><code>WritableComparable</code>s can be compared to each other, typically
+ via <code>Comparator</code>s. Any type which is to be used as a
+ <code>key</code> in the Hadoop Map-Reduce framework should implement this
+ interface.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyWritableComparable implements WritableComparable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public int compareTo(MyWritableComparable w) {
+ int thisValue = this.value;
+ int thatValue = ((IntWritable)o).value;
+ return (thisValue &lt; thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
+ }
+ }
+ </pre></blockquote></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableComparable -->
+ <!-- start class org.apache.hadoop.io.WritableComparator -->
+ <class name="WritableComparator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator"/>
+ <constructor name="WritableComparator" type="java.lang.Class"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </constructor>
+ <constructor name="WritableComparator" type="java.lang.Class, boolean"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="get" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get a comparator for a {@link WritableComparable} implementation.]]>
+ </doc>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link WritableComparable}
+ implementation.]]>
+ </doc>
+ </method>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the WritableComparable implementation class.]]>
+ </doc>
+ </method>
+ <method name="newKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a new {@link WritableComparable} instance.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Optimization hook. Override this to make SequenceFile.Sorter's scream.
+
+ <p>The default implementation reads the data into two {@link
+ WritableComparable}s (using {@link
+ Writable#readFields(DataInput)}, then calls {@link
+ #compare(WritableComparable,WritableComparable)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+ <doc>
+ <![CDATA[Compare two WritableComparables.
+
+ <p> The default implementation uses the natural ordering, calling {@link
+ Comparable#compareTo(Object)}.]]>
+ </doc>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <method name="hashBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Compute hash for binary data.]]>
+ </doc>
+ </method>
+ <method name="readUnsignedShort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an unsigned short from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse an integer from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a long from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A Comparator for {@link WritableComparable}s.
+
+ <p>This base implemenation uses the natural ordering. To define alternate
+ orderings, override {@link #compare(WritableComparable,WritableComparable)}.
+
+ <p>One may optimize compare-intensive operations by overriding
+ {@link #compare(byte[],int,int,byte[],int,int)}. Static utility methods are
+ provided to assist in optimized implementations of this method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableComparator -->
+ <!-- start class org.apache.hadoop.io.WritableFactories -->
+ <class name="WritableFactories" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="factory" type="org.apache.hadoop.io.WritableFactory"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.io.WritableFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Define a factory for a class.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Create a new instance of a class with a defined factory.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factories for non-public writables. Defining a factory permits {@link
+ ObjectWritable} to be able to construct instances of non-public classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableFactories -->
+ <!-- start interface org.apache.hadoop.io.WritableFactory -->
+ <interface name="WritableFactory" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="newInstance" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a new instance.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A factory for a class of Writable.
+ @see WritableFactories]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.WritableFactory -->
+ <!-- start class org.apache.hadoop.io.WritableName -->
+ <class name="WritableName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the name that a class should be known as to something other than the
+ class name.]]>
+ </doc>
+ </method>
+ <method name="addName"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add an alternate name for a class.]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writableClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Return the name for a class. Default is {@link Class#getName()}.]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the class for a name. Default is {@link Class#forName(String)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Utility to permit renaming of Writable implementation classes without
+ invalidiating files that contain their class name.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableName -->
+ <!-- start class org.apache.hadoop.io.WritableUtils -->
+ <class name="WritableUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="WritableUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readCompressedByteArray" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skipCompressedByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedByteArray" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="bytes" type="byte[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedString" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedStringArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="s" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readCompressedStringArray" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="displayByteArray"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="record" type="byte[]"/>
+ </method>
+ <method name="clone" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="orig" type="org.apache.hadoop.io.Writable"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Make a copy of a writable object using serialization to a buffer.
+ @param orig The object to copy
+ @return The copied object]]>
+ </doc>
+ </method>
+ <method name="cloneInto"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="use ReflectionUtils.cloneInto instead.">
+ <param name="dst" type="org.apache.hadoop.io.Writable"/>
+ <param name="src" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a copy of the writable object using serialiation to a buffer
+ @param dst the object to copy from
+ @param src the object to copy into, which is destroyed
+ @throws IOException
+ @deprecated use ReflectionUtils.cloneInto instead.]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an integer to a binary stream with zero-compressed encoding.
+ For -120 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ integer is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -121 and -124, the following integer
+ is positive, with number of bytes that follow are -(v+120).
+ If the first byte value v is between -125 and -128, the following integer
+ is negative, with number of bytes that follow are -(v+124). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Integer to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized long from stream.]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized integer from stream.]]>
+ </doc>
+ </method>
+ <method name="isNegativeVInt" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Given the first byte of a vint/vlong, determine the sign
+ @param value the first byte
+ @return is the value negative]]>
+ </doc>
+ </method>
+ <method name="decodeVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte"/>
+ <doc>
+ <![CDATA[Parse the first byte of a vint/vlong to determine the number of bytes
+ @param value the first byte of the vint/vlong
+ @return the total number of bytes (1 to 9)]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="readEnum" return="java.lang.Enum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="enumType" type="java.lang.Class"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an Enum value from DataInput, Enums are read and written
+ using String values.
+ @param <T> Enum type
+ @param in DataInput to read from
+ @param enumType Class type of Enum
+ @return Enum represented by String read from DataInput
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="writeEnum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <param name="enumVal" type="java.lang.Enum"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[writes String value of enum to DataOutput.
+ @param out Dataoutput stream
+ @param enumVal enum value
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="skipFully"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip <i>len</i> number of bytes in input stream<i>in</i>
+ @param in input stream
+ @param len number of bytes to skip
+ @throws IOException when skipped less number of bytes]]>
+ </doc>
+ </method>
+ <method name="toByteArray" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="writables" type="org.apache.hadoop.io.Writable[]"/>
+ <doc>
+ <![CDATA[Convert writables to a byte array]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.WritableUtils -->
+</package>
+<package name="org.apache.hadoop.io.compress">
+ <!-- start class org.apache.hadoop.io.compress.BlockCompressorStream -->
+ <class name="BlockCompressorStream" extends="org.apache.hadoop.io.compress.CompressorStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockCompressorStream" type="java.io.OutputStream, org.apache.hadoop.io.compress.Compressor, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a {@link BlockCompressorStream}.
+
+ @param out stream
+ @param compressor compressor to be used
+ @param bufferSize size of buffer
+ @param compressionOverhead maximum 'overhead' of the compression
+ algorithm with given bufferSize]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockCompressorStream" type="java.io.OutputStream, org.apache.hadoop.io.compress.Compressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a {@link BlockCompressorStream} with given output-stream and
+ compressor.
+ Use default of 512 as bufferSize and compressionOverhead of
+ (1% of bufferSize + 12 bytes) = 18 bytes (zlib algorithm).
+
+ @param out stream
+ @param compressor compressor to be used]]>
+ </doc>
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the data provided to the compression codec, compressing no more
+ than the buffer size less the compression overhead as specified during
+ construction for each block.
+
+ Each block contains the uncompressed length for the block, followed by
+ one or more length-prefixed blocks of compressed data.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link org.apache.hadoop.io.compress.CompressorStream} which works
+ with 'block-based' based compression algorithms, as opposed to
+ 'stream-based' compression algorithms.
+
+ It should be noted that this wrapper does not guarantee that blocks will
+ be sized for the compressor. If the
+ {@link org.apache.hadoop.io.compress.Compressor} requires buffering to
+ effect meaningful compression, it is responsible for it.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.BlockCompressorStream -->
+ <!-- start class org.apache.hadoop.io.compress.BlockDecompressorStream -->
+ <class name="BlockDecompressorStream" extends="org.apache.hadoop.io.compress.DecompressorStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BlockDecompressorStream" type="java.io.InputStream, org.apache.hadoop.io.compress.Decompressor, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a {@link BlockDecompressorStream}.
+
+ @param in input stream
+ @param decompressor decompressor to use
+ @param bufferSize size of buffer]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockDecompressorStream" type="java.io.InputStream, org.apache.hadoop.io.compress.Decompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a {@link BlockDecompressorStream}.
+
+ @param in input stream
+ @param decompressor decompressor to use]]>
+ </doc>
+ </constructor>
+ <constructor name="BlockDecompressorStream" type="java.io.InputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressedData"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link org.apache.hadoop.io.compress.DecompressorStream} which works
+ with 'block-based' based compression algorithms, as opposed to
+ 'stream-based' compression algorithms.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.BlockDecompressorStream -->
+ <!-- start class org.apache.hadoop.io.compress.BZip2Codec -->
+ <class name="BZip2Codec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="BZip2Codec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BZip2Codec]]>
+ </doc>
+ </constructor>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates CompressionOutputStream for BZip2
+
+ @param out
+ The output Stream
+ @return The BZip2 CompressionOutputStream
+ @throws java.io.IOException
+ Throws IO exception]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Creates CompressionInputStream to be used to read off uncompressed data.
+
+ @param in
+ The InputStream
+ @return Returns CompressionInputStream for BZip2
+ @throws java.io.IOException
+ Throws IOException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This functionality is currently not supported.
+
+ @throws java.lang.UnsupportedOperationException
+ Throws UnsupportedOperationException]]>
+ </doc>
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[.bz2 is recognized as the default extension for compressed BZip2 files
+
+ @return A String telling the default bzip2 file extension]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class provides CompressionOutputStream and CompressionInputStream for
+ compression and decompression. Currently we dont have an implementation of
+ the Compressor and Decompressor interfaces, so those methods of
+ CompressionCodec which have a Compressor or Decompressor type argument, throw
+ UnsupportedOperationException.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.BZip2Codec -->
+ <!-- start class org.apache.hadoop.io.compress.CodecPool -->
+ <class name="CodecPool" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CodecPool"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <doc>
+ <![CDATA[Get a {@link Compressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the
+ <code>Compressor</code>
+ @return <code>Compressor</code> for the given
+ <code>CompressionCodec</code> from the pool or a new one]]>
+ </doc>
+ </method>
+ <method name="getDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <doc>
+ <![CDATA[Get a {@link Decompressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the
+ <code>Decompressor</code>
+ @return <code>Decompressor</code> for the given
+ <code>CompressionCodec</code> the pool or a new one]]>
+ </doc>
+ </method>
+ <method name="returnCompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <doc>
+ <![CDATA[Return the {@link Compressor} to the pool.
+
+ @param compressor the <code>Compressor</code> to be returned to the pool]]>
+ </doc>
+ </method>
+ <method name="returnDecompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <doc>
+ <![CDATA[Return the {@link Decompressor} to the pool.
+
+ @param decompressor the <code>Decompressor</code> to be returned to the
+ pool]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A global compressor/decompressor pool used to save and reuse
+ (possibly native) compression/decompression codecs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CodecPool -->
+ <!-- start interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <interface name="CompressionCodec" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream}.
+
+ @param out the location for the final output stream
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream} with the given {@link Compressor}.
+
+ @param out the location for the final output stream
+ @param compressor compressor to use
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
+
+ @return the type of compressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Compressor} for use by this {@link CompressionCodec}.
+
+ @return a new compressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a stream decompressor that will read from the given input stream.
+
+ @param in the stream to read compressed bytes from
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a {@link CompressionInputStream} that will read from the given
+ {@link InputStream} with the given {@link Decompressor}.
+
+ @param in the stream to read compressed bytes from
+ @param decompressor decompressor to use
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
+
+ @return the type of decompressor needed by this codec.]]>
+ </doc>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
+
+ @return a new decompressor for use by this codec]]>
+ </doc>
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a streaming compression/decompression pair.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.CompressionCodec -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <class name="CompressionCodecFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionCodecFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Find the codecs specified in the config value io.compression.codecs
+ and register them. Defaults to gzip and zip.]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Print the extension map out as a string.]]>
+ </doc>
+ </method>
+ <method name="getCodecClasses" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the list of codecs listed in the configuration
+ @param conf the configuration to look in
+ @return a list of the Configuration classes or null if the attribute
+ was not set]]>
+ </doc>
+ </method>
+ <method name="setCodecClasses"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="classes" type="java.util.List"/>
+ <doc>
+ <![CDATA[Sets a list of codec classes in the configuration.
+ @param conf the configuration to modify
+ @param classes the list of classes to set]]>
+ </doc>
+ </method>
+ <method name="getCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Find the relevant compression codec for the given file based on its
+ filename suffix.
+ @param file the filename to check
+ @return the codec object]]>
+ </doc>
+ </method>
+ <method name="removeSuffix" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filename" type="java.lang.String"/>
+ <param name="suffix" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes a suffix from a filename, if it has it.
+ @param filename the filename to strip
+ @param suffix the suffix to remove
+ @return the shortened filename]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[A little test program.
+ @param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A factory that will find the correct codec for a given filename.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <class name="CompressionInputStream" extends="java.io.InputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression input stream that reads
+ the decompressed bytes from the given stream.
+
+ @param in The input stream to be compressed.]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read bytes from the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the decompressor to its initial state and discard any buffered data,
+ as the underlying stream may have been repositioned.]]>
+ </doc>
+ </method>
+ <field name="in" type="java.io.InputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The input stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression input stream.
+
+ <p>Implementations are assumed to be buffered. This permits clients to
+ reposition the underlying input stream then call {@link #resetState()},
+ without having to also synchronize client buffers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <class name="CompressionOutputStream" extends="java.io.OutputStream"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressionOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a compression output stream that writes
+ the compressed bytes to the given stream.
+ @param out]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write compressed bytes to the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Finishes writing compressed data to the output stream
+ without closing the underlying stream.]]>
+ </doc>
+ </method>
+ <method name="resetState"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reset the compression to the initial state.
+ Does not reset the underlying stream.]]>
+ </doc>
+ </method>
+ <field name="out" type="java.io.OutputStream"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The output stream to be compressed.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A compression output stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressionOutputStream -->
+ <!-- start interface org.apache.hadoop.io.compress.Compressor -->
+ <interface name="Compressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for compression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of uncompressed bytes input so far.]]>
+ </doc>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return number of compressed bytes output so far.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[When called, indicates that compression should end
+ with the current contents of the input buffer.]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with compressed data. Returns actual number
+ of bytes of compressed data. A return value of 0 indicates that
+ needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets compressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the compressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'compressor' which can be
+ plugged into a {@link CompressionOutputStream} to compress data.
+ This is modelled after {@link java.util.zip.Deflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Compressor -->
+ <!-- start class org.apache.hadoop.io.compress.CompressorStream -->
+ <class name="CompressorStream" extends="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompressorStream" type="java.io.OutputStream, org.apache.hadoop.io.compress.Compressor, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CompressorStream" type="java.io.OutputStream, org.apache.hadoop.io.compress.Compressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CompressorStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow derived classes to directly set the underlying stream.
+
+ @param out Underlying output stream.]]>
+ </doc>
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="compressor" type="org.apache.hadoop.io.compress.Compressor"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="buffer" type="byte[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="closed" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.CompressorStream -->
+ <!-- start interface org.apache.hadoop.io.compress.Decompressor -->
+ <interface name="Decompressor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets input data for decompression.
+ This should be called whenever #needsInput() returns
+ <code>true</code> indicating that more input data is required.
+
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the input data buffer is empty and
+ #setInput() should be called to provide more input.
+
+ @return <code>true</code> if the input data buffer is empty and
+ #setInput() should be called in order to provide more input.]]>
+ </doc>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <doc>
+ <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined.
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+ </doc>
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns <code>true</code> if a preset dictionary is needed for decompression.
+ @return <code>true</code> if a preset dictionary is needed for decompression]]>
+ </doc>
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the end of the compressed
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+ </doc>
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fills specified buffer with uncompressed data. Returns actual number
+ of bytes of uncompressed data. A return value of 0 indicates that
+ #needsInput() should be called in order to determine if more input
+ data is required.
+
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Resets decompressor so that a new set of input data can be processed.]]>
+ </doc>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Closes the decompressor and discards any unprocessed input.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Specification of a stream-based 'de-compressor' which can be
+ plugged into a {@link CompressionInputStream} to compress data.
+ This is modelled after {@link java.util.zip.Inflater}]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.Decompressor -->
+ <!-- start class org.apache.hadoop.io.compress.DecompressorStream -->
+ <class name="DecompressorStream" extends="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DecompressorStream" type="java.io.InputStream, org.apache.hadoop.io.compress.Decompressor, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DecompressorStream" type="java.io.InputStream, org.apache.hadoop.io.compress.Decompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="DecompressorStream" type="java.io.InputStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow derived classes to directly set the underlying stream.
+
+ @param in Underlying input stream.]]>
+ </doc>
+ </constructor>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressedData"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="markSupported" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="mark"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readlimit" type="int"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="buffer" type="byte[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="eof" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="closed" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.DecompressorStream -->
+ <!-- start class org.apache.hadoop.io.compress.DefaultCodec -->
+ <class name="DefaultCodec" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+ <constructor name="DefaultCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.DefaultCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec -->
+ <class name="GzipCodec" extends="org.apache.hadoop.io.compress.DefaultCodec"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDecompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDefaultExtension" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class creates gzip compressors/decompressors.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <class name="GzipCodec.GzipInputStream" extends="org.apache.hadoop.io.compress.DecompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipInputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipInputStream" type="org.apache.hadoop.io.compress.DecompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow subclasses to directly set the inflater stream.]]>
+ </doc>
+ </constructor>
+ <method name="available" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="skip" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="offset" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream -->
+ <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+ <class name="GzipCodec.GzipOutputStream" extends="org.apache.hadoop.io.compress.CompressorStream"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="GzipCodec.GzipOutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="GzipCodec.GzipOutputStream" type="org.apache.hadoop.io.compress.CompressorStream"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Allow children types to put a different type in here.
+ @param out the Deflater stream to use]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="resetState"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A bridge that wraps around a DeflaterOutputStream to make it
+ a CompressionOutputStream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream -->
+</package>
+<package name="org.apache.hadoop.io.compress.bzip2">
+ <!-- start interface org.apache.hadoop.io.compress.bzip2.BZip2Constants -->
+ <interface name="BZip2Constants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="baseBlockSize" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_ALPHA_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_CODE_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNA" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNB" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="N_GROUPS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="G_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="N_ITERS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAX_SELECTORS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NUM_OVERSHOOT_BYTES" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="rNums" type="int[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This array really shouldn't be here. Again, for historical purposes it
+ is.
+
+ <p>
+ FIXME: This array should be in a private or package private location,
+ since it could be modified by malicious code.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Base class for both the compress and decompress classes. Holds common arrays,
+ and static data.
+ <p>
+ This interface is public for historical purposes. You should have no need to
+ use it.
+ </p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.compress.bzip2.BZip2Constants -->
+ <!-- start class org.apache.hadoop.io.compress.bzip2.BZip2DummyCompressor -->
+ <class name="BZip2DummyCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="BZip2DummyCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[This is a dummy compressor for BZip2.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.bzip2.BZip2DummyCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.bzip2.BZip2DummyDecompressor -->
+ <class name="BZip2DummyDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="BZip2DummyDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[This is a dummy decompressor for BZip2.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.bzip2.BZip2DummyDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.bzip2.CBZip2InputStream -->
+ <class name="CBZip2InputStream" extends="java.io.InputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.bzip2.BZip2Constants"/>
+ <constructor name="CBZip2InputStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a new CBZip2InputStream which decompresses bytes read from the
+ specified stream.
+
+ <p>
+ Although BZip2 headers are marked with the magic <tt>"Bz"</tt> this
+ constructor expects the next byte in the stream to be the first one after
+ the magic. Thus callers have to skip the first two bytes. Otherwise this
+ constructor will throw an exception.
+ </p>
+
+ @throws IOException
+ if the stream content is malformed or an I/O error occurs.
+ @throws NullPointerException
+ if <tt>in == null</tt>]]>
+ </doc>
+ </constructor>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dest" type="byte[]"/>
+ <param name="offs" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An input stream that decompresses from the BZip2 format (without the file
+ header chars) to be read as any other stream.
+
+ <p>
+ The decompression requires large amounts of memory. Thus you should call the
+ {@link #close() close()} method as soon as possible, to force
+ <tt>CBZip2InputStream</tt> to release the allocated memory. See
+ {@link CBZip2OutputStream CBZip2OutputStream} for information about memory
+ usage.
+ </p>
+
+ <p>
+ <tt>CBZip2InputStream</tt> reads bytes from the compressed source stream via
+ the single byte {@link java.io.InputStream#read() read()} method exclusively.
+ Thus you should consider to use a buffered source stream.
+ </p>
+
+ <p>
+ Instances of this class are not threadsafe.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.bzip2.CBZip2InputStream -->
+ <!-- start class org.apache.hadoop.io.compress.bzip2.CBZip2OutputStream -->
+ <class name="CBZip2OutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.bzip2.BZip2Constants"/>
+ <constructor name="CBZip2OutputStream" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a new <tt>CBZip2OutputStream</tt> with a blocksize of 900k.
+
+ <p>
+ <b>Attention: </b>The caller is resonsible to write the two BZip2 magic
+ bytes <tt>"BZ"</tt> to the specified stream prior to calling this
+ constructor.
+ </p>
+
+ @param out *
+ the destination stream.
+
+ @throws IOException
+ if an I/O error occurs in the specified stream.
+ @throws NullPointerException
+ if <code>out == null</code>.]]>
+ </doc>
+ </constructor>
+ <constructor name="CBZip2OutputStream" type="java.io.OutputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a new <tt>CBZip2OutputStream</tt> with specified blocksize.
+
+ <p>
+ <b>Attention: </b>The caller is resonsible to write the two BZip2 magic
+ bytes <tt>"BZ"</tt> to the specified stream prior to calling this
+ constructor.
+ </p>
+
+
+ @param out
+ the destination stream.
+ @param blockSize
+ the blockSize as 100k units.
+
+ @throws IOException
+ if an I/O error occurs in the specified stream.
+ @throws IllegalArgumentException
+ if <code>(blockSize < 1) || (blockSize > 9)</code>.
+ @throws NullPointerException
+ if <code>out == null</code>.
+
+ @see #MIN_BLOCKSIZE
+ @see #MAX_BLOCKSIZE]]>
+ </doc>
+ </constructor>
+ <method name="hbMakeCodeLengths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="len" type="char[]"/>
+ <param name="freq" type="int[]"/>
+ <param name="alphaSize" type="int"/>
+ <param name="maxLen" type="int"/>
+ <doc>
+ <![CDATA[This method is accessible by subclasses for historical purposes. If you
+ don't know what it does then you don't need it.]]>
+ </doc>
+ </method>
+ <method name="chooseBlockSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputLength" type="long"/>
+ <doc>
+ <![CDATA[Chooses a blocksize based on the given length of the data to compress.
+
+ @return The blocksize, between {@link #MIN_BLOCKSIZE} and
+ {@link #MAX_BLOCKSIZE} both inclusive. For a negative
+ <tt>inputLength</tt> this method returns <tt>MAX_BLOCKSIZE</tt>
+ always.
+
+ @param inputLength
+ The length of the data which will be compressed by
+ <tt>CBZip2OutputStream</tt>.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Overriden to close the stream.]]>
+ </doc>
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBlockSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the blocksize parameter specified at construction time.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offs" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="MIN_BLOCKSIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The minimum supported blocksize <tt> == 1</tt>.]]>
+ </doc>
+ </field>
+ <field name="MAX_BLOCKSIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The maximum supported blocksize <tt> == 9</tt>.]]>
+ </doc>
+ </field>
+ <field name="SETMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="CLEARMASK" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="GREATER_ICOST" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="LESSER_ICOST" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="SMALL_THRESH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="DEPTH_THRESH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="WORK_FACTOR" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.]]>
+ </doc>
+ </field>
+ <field name="QSORT_STACK_SIZE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constant is accessible by subclasses for historical purposes. If you
+ don't know what it means then you don't need it.
+ <p>
+ If you are ever unlucky/improbable enough to get a stack overflow whilst
+ sorting, increase the following constant and try again. In practice I
+ have never seen the stack go above 27 elems, so the following limit seems
+ very generous.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An output stream that compresses into the BZip2 format (without the file
+ header chars) into another stream.
+
+ <p>
+ The compression requires large amounts of memory. Thus you should call the
+ {@link #close() close()} method as soon as possible, to force
+ <tt>CBZip2OutputStream</tt> to release the allocated memory.
+ </p>
+
+ <p>
+ You can shrink the amount of allocated memory and maybe raise the compression
+ speed by choosing a lower blocksize, which in turn may cause a lower
+ compression ratio. You can avoid unnecessary memory allocation by avoiding
+ using a blocksize which is bigger than the size of the input.
+ </p>
+
+ <p>
+ You can compute the memory usage for compressing by the following formula:
+ </p>
+
+ <pre>
+ &lt;code&gt;400k + (9 * blocksize)&lt;/code&gt;.
+ </pre>
+
+ <p>
+ To get the memory required for decompression by {@link CBZip2InputStream
+ CBZip2InputStream} use
+ </p>
+
+ <pre>
+ &lt;code&gt;65k + (5 * blocksize)&lt;/code&gt;.
+ </pre>
+
+ <table width="100%" border="1">
+ <colgroup> <col width="33%" /> <col width="33%" /> <col width="33%" />
+ </colgroup>
+ <tr>
+ <th colspan="3">Memory usage by blocksize</th>
+ </tr>
+ <tr>
+ <th align="right">Blocksize</th> <th align="right">Compression<br>
+ memory usage</th> <th align="right">Decompression<br>
+ memory usage</th>
+ </tr>
+ <tr>
+ <td align="right">100k</td>
+ <td align="right">1300k</td>
+ <td align="right">565k</td>
+ </tr>
+ <tr>
+ <td align="right">200k</td>
+ <td align="right">2200k</td>
+ <td align="right">1065k</td>
+ </tr>
+ <tr>
+ <td align="right">300k</td>
+ <td align="right">3100k</td>
+ <td align="right">1565k</td>
+ </tr>
+ <tr>
+ <td align="right">400k</td>
+ <td align="right">4000k</td>
+ <td align="right">2065k</td>
+ </tr>
+ <tr>
+ <td align="right">500k</td>
+ <td align="right">4900k</td>
+ <td align="right">2565k</td>
+ </tr>
+ <tr>
+ <td align="right">600k</td>
+ <td align="right">5800k</td>
+ <td align="right">3065k</td>
+ </tr>
+ <tr>
+ <td align="right">700k</td>
+ <td align="right">6700k</td>
+ <td align="right">3565k</td>
+ </tr>
+ <tr>
+ <td align="right">800k</td>
+ <td align="right">7600k</td>
+ <td align="right">4065k</td>
+ </tr>
+ <tr>
+ <td align="right">900k</td>
+ <td align="right">8500k</td>
+ <td align="right">4565k</td>
+ </tr>
+ </table>
+
+ <p>
+ For decompression <tt>CBZip2InputStream</tt> allocates less memory if the
+ bzipped input is smaller than one block.
+ </p>
+
+ <p>
+ Instances of this class are not threadsafe.
+ </p>
+
+ <p>
+ TODO: Update to BZip2 1.0.1
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.bzip2.CBZip2OutputStream -->
+</package>
+<package name="org.apache.hadoop.io.compress.zlib">
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <class name="BuiltInZlibDeflater" extends="java.util.zip.Deflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="BuiltInZlibDeflater" type="int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibDeflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Deflater to make it conform
+ to org.apache.hadoop.io.compress.Compressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <class name="BuiltInZlibInflater" extends="java.util.zip.Inflater"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="BuiltInZlibInflater" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="BuiltInZlibInflater"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A wrapper around java.util.zip.Inflater to make it conform
+ to org.apache.hadoop.io.compress.Decompressor interface.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <class name="ZlibCompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Compressor"/>
+ <constructor name="ZlibCompressor" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor using the specified compression level.
+ Compressed data will be generated in ZLIB format.
+
+ @param level Compression level #CompressionLevel
+ @param strategy Compression strategy #CompressionStrategy
+ @param header Compression header #CompressionHeader
+ @param directBufferSize Size of the direct buffer to be used.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibCompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new compressor with the default compression level.
+ Compressed data will be generated in ZLIB format.]]>
+ </doc>
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finish"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Compressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <class name="ZlibCompressor.CompressionHeader" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="NO_HEADER" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[No headers/trailers/checksums.]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_HEADER" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default headers/trailers/checksums.]]>
+ </doc>
+ </field>
+ <field name="GZIP_FORMAT" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Simple gzip headers/trailers.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The type of header for compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <class name="ZlibCompressor.CompressionLevel" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NO_COMPRESSION" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compression level for no compression.]]>
+ </doc>
+ </field>
+ <field name="BEST_SPEED" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compression level for fastest compression.]]>
+ </doc>
+ </field>
+ <field name="BEST_COMPRESSION" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compression level for best compression.]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_COMPRESSION" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default compression level.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <class name="ZlibCompressor.CompressionStrategy" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="FILTERED" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compression strategy best used for data consisting mostly of small
+ values with a somewhat random distribution. Forces more Huffman coding
+ and less string matching.]]>
+ </doc>
+ </field>
+ <field name="HUFFMAN_ONLY" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compression strategy for Huffman coding only.]]>
+ </doc>
+ </field>
+ <field name="RLE" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compression strategy to limit match distances to one
+ (run-length encoding).]]>
+ </doc>
+ </field>
+ <field name="FIXED" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Compression strategy to prevent the use of dynamic Huffman codes,
+ allowing for a simpler decoder for special applications.]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_STRATEGY" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default compression strategy.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The compression level for zlib library.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <class name="ZlibDecompressor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.compress.Decompressor"/>
+ <constructor name="ZlibDecompressor" type="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new decompressor.]]>
+ </doc>
+ </constructor>
+ <constructor name="ZlibDecompressor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="setDictionary"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="needsInput" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="needsDictionary" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="decompress" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getBytesWritten" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of compressed bytes output so far.
+
+ @return the total (non-negative) number of compressed bytes output so far]]>
+ </doc>
+ </method>
+ <method name="getBytesRead" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of uncompressed bytes input so far.</p>
+
+ @return the total (non-negative) number of uncompressed bytes input so far]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="end"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="finalize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A {@link Decompressor} based on the popular
+ zlib compression algorithm.
+ http://www.zlib.net/]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <class name="ZlibDecompressor.CompressionHeader" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="windowBits" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="NO_HEADER" type="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[No headers/trailers/checksums.]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_HEADER" type="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default headers/trailers/checksums.]]>
+ </doc>
+ </field>
+ <field name="GZIP_FORMAT" type="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Simple gzip headers/trailers.]]>
+ </doc>
+ </field>
+ <field name="AUTODETECT_GZIP_ZLIB" type="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Autodetect gzip/zlib headers/trailers.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The headers to detect from compressed data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader -->
+ <!-- start class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+ <class name="ZlibFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ZlibFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeZlibLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check if native-zlib code is loaded & initialized correctly and
+ can be loaded for this job.
+
+ @param conf configuration
+ @return <code>true</code> if native-zlib is loaded & initialized
+ and can be loaded for this job, else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibCompressor" return="org.apache.hadoop.io.compress.Compressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib compressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib compressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressorType" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate type of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate type of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <method name="getZlibDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return the appropriate implementation of the zlib decompressor.
+
+ @param conf configuration
+ @return the appropriate implementation of the zlib decompressor.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A collection of factories to create the right
+ zlib/gzip compressor/decompressor instances.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.compress.zlib.ZlibFactory -->
+</package>
+<package name="org.apache.hadoop.io.retry">
+ <!-- start class org.apache.hadoop.io.retry.RetryPolicies -->
+ <class name="RetryPolicies" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryPolicies"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="retryUpToMaximumCountWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumTimeWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxTime" type="long"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying for a maximum time, waiting a fixed time between attempts,
+ and then fail by re-throwing the exception.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryUpToMaximumCountWithProportionalSleep" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by the number of tries so far.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="exponentialBackoffRetry" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="maxRetries" type="int"/>
+ <param name="sleepTime" type="long"/>
+ <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+ <doc>
+ <![CDATA[<p>
+ Keep trying a limited number of times, waiting a growing amount of time between attempts,
+ and then fail by re-throwing the exception.
+ The time between attempts is <code>sleepTime</code> mutliplied by a random
+ number in the range of [0, 2 to the number of retries)
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map"/>
+ <doc>
+ <![CDATA[<p>
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <method name="retryByRemoteException" return="org.apache.hadoop.io.retry.RetryPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <param name="exceptionToPolicyMap" type="java.util.Map"/>
+ <doc>
+ <![CDATA[<p>
+ A retry policy for RemoteException
+ Set a default policy with some explicit handlers for specific exceptions.
+ </p>]]>
+ </doc>
+ </method>
+ <field name="TRY_ONCE_THEN_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail by re-throwing the exception.
+ This corresponds to having no retry mechanism in place.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="TRY_ONCE_DONT_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Try once, and fail silently for <code>void</code> methods, or by
+ re-throwing the exception for non-<code>void</code> methods.
+ </p>]]>
+ </doc>
+ </field>
+ <field name="RETRY_FOREVER" type="org.apache.hadoop.io.retry.RetryPolicy"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Keep trying forever.
+ </p>]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[<p>
+ A collection of useful implementations of {@link RetryPolicy}.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryPolicies -->
+ <!-- start interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <interface name="RetryPolicy" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="shouldRetry" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Exception"/>
+ <param name="retries" type="int"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[<p>
+ Determines whether the framework should retry a
+ method for the given exception, and the number
+ of retries that have been made for that operation
+ so far.
+ </p>
+ @param e The exception that caused the method to fail.
+ @param retries The number of times the method has been retried.
+ @return <code>true</code> if the method should be retried,
+ <code>false</code> if the method should not be retried
+ but shouldn't fail with an exception (only for void methods).
+ @throws Exception The re-thrown exception <code>e</code> indicating
+ that the method failed and should not be retried further.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Specifies a policy for retrying method failures.
+ Implementations of this interface should be immutable.
+ </p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.retry.RetryPolicy -->
+ <!-- start class org.apache.hadoop.io.retry.RetryProxy -->
+ <class name="RetryProxy" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RetryProxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the same retry policy for each method in the interface.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param retryPolicy the policy for retirying method call failures
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <method name="create" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="iface" type="java.lang.Class"/>
+ <param name="implementation" type="java.lang.Object"/>
+ <param name="methodNameToPolicyMap" type="java.util.Map"/>
+ <doc>
+ <![CDATA[<p>
+ Create a proxy for an interface of an implementation class
+ using the a set of retry policies specified by method name.
+ If no retry policy is defined for a method then a default of
+ {@link RetryPolicies#TRY_ONCE_THEN_FAIL} is used.
+ </p>
+ @param iface the interface that the retry will implement
+ @param implementation the instance whose methods should be retried
+ @param methodNameToPolicyMap a map of method names to retry policies
+ @return the retry proxy]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for creating retry proxies.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.retry.RetryProxy -->
+</package>
+<package name="org.apache.hadoop.io.serializer">
+ <!-- start interface org.apache.hadoop.io.serializer.Deserializer -->
+ <interface name="Deserializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.InputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the deserializer for reading.</p>]]>
+ </doc>
+ </method>
+ <method name="deserialize" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>
+ Deserialize the next object from the underlying input stream.
+ If the object <code>t</code> is non-null then this deserializer
+ <i>may</i> set its internal state to the next object read from the input
+ stream. Otherwise, if the object <code>t</code> is null a new
+ deserialized object will be created.
+ </p>
+ @return the deserialized object]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying input stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for deserializing objects of type <T> from an
+ {@link InputStream}.
+ </p>
+
+ <p>
+ Deserializers are stateful, but must not buffer the input since
+ other producers may read from the input between calls to
+ {@link #deserialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Deserializer -->
+ <!-- start class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <class name="DeserializerComparator" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.RawComparator"/>
+ <constructor name="DeserializerComparator" type="org.apache.hadoop.io.serializer.Deserializer"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link Deserializer} to deserialize
+ the objects to be compared so that the standard {@link Comparator} can
+ be used to compare them.
+ </p>
+ <p>
+ One may optimize compare-intensive operations by using a custom
+ implementation of {@link RawComparator} that operates directly
+ on byte representations.
+ </p>
+ @param <T>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.DeserializerComparator -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <class name="JavaSerialization" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization"/>
+ <constructor name="JavaSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ An experimental {@link Serialization} for Java {@link Serializable} classes.
+ </p>
+ @see JavaSerializationComparator]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerialization -->
+ <!-- start class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <class name="JavaSerializationComparator" extends="org.apache.hadoop.io.serializer.DeserializerComparator"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JavaSerializationComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o1" type="java.io.Serializable"/>
+ <param name="o2" type="java.io.Serializable"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link JavaSerialization}
+ {@link Deserializer} to deserialize objects that are then compared via
+ their {@link Comparable} interfaces.
+ </p>
+ @param <T>
+ @see JavaSerialization]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serialization -->
+ <interface name="Serialization" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Allows clients to test whether this {@link Serialization}
+ supports the given class.]]>
+ </doc>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[@return a {@link Serializer} for the given class.]]>
+ </doc>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[@return a {@link Deserializer} for the given class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Encapsulates a {@link Serializer}/{@link Deserializer} pair.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serialization -->
+ <!-- start class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <class name="SerializationFactory" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SerializationFactory" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[<p>
+ Serializations are found by reading the <code>io.serializations</code>
+ property from <code>conf</code>, which is a comma-delimited list of
+ classnames.
+ </p>]]>
+ </doc>
+ </constructor>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ </method>
+ <method name="getSerialization" return="org.apache.hadoop.io.serializer.Serialization"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ A factory for {@link Serialization}s.
+ </p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.SerializationFactory -->
+ <!-- start interface org.apache.hadoop.io.serializer.Serializer -->
+ <interface name="Serializer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="open"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.OutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Prepare the serializer for writing.</p>]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Serialize <code>t</code> to the underlying output stream.</p>]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<p>Close the underlying output stream and clear up any resources.</p>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<p>
+ Provides a facility for serializing objects of type <T> to an
+ {@link OutputStream}.
+ </p>
+
+ <p>
+ Serializers are stateful, but must not buffer the output since
+ other producers may write to the output between calls to
+ {@link #serialize(Object)}.
+ </p>
+ @param <T>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.io.serializer.Serializer -->
+ <!-- start class org.apache.hadoop.io.serializer.WritableSerialization -->
+ <class name="WritableSerialization" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.serializer.Serialization"/>
+ <constructor name="WritableSerialization"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ </method>
+ <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ </method>
+ <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Serialization} for {@link Writable}s that delegates to
+ {@link Writable#write(java.io.DataOutput)} and
+ {@link Writable#readFields(java.io.DataInput)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.io.serializer.WritableSerialization -->
+</package>
+<package name="org.apache.hadoop.ipc">
+ <!-- start class org.apache.hadoop.ipc.Client -->
+ <class name="Client" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Client" type="java.lang.Class, org.apache.hadoop.conf.Configuration, javax.net.SocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client whose values are of the given {@link Writable}
+ class.]]>
+ </doc>
+ </constructor>
+ <constructor name="Client" type="java.lang.Class, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an IPC client with the default SocketFactory
+ @param valueClass
+ @param conf]]>
+ </doc>
+ </constructor>
+ <method name="setPingInterval"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="pingInterval" type="int"/>
+ <doc>
+ <![CDATA[set the ping interval value in configuration
+
+ @param conf Configuration
+ @param pingInterval the ping interval]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stop all threads related to this client. No further calls may be made
+ using this client.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #call(Writable, InetSocketAddress, Class, UserGroupInformation)} instead">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a call, passing <code>param</code>, to the IPC server running at
+ <code>address</code>, returning the value. Throws exceptions if there are
+ network problems or if the remote code threw an exception.
+ @deprecated Use {@link #call(Writable, InetSocketAddress, Class, UserGroupInformation)} instead]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #call(Writable, InetSocketAddress, Class, UserGroupInformation)} instead">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a call, passing <code>param</code>, to the IPC server running at
+ <code>address</code> with the <code>ticket</code> credentials, returning
+ the value.
+ Throws exceptions if there are network problems or if the remote code
+ threw an exception.
+ @deprecated Use {@link #call(Writable, InetSocketAddress, Class, UserGroupInformation)} instead]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a call, passing <code>param</code>, to the IPC server running at
+ <code>address</code> which is servicing the <code>protocol</code> protocol,
+ with the <code>ticket</code> credentials, returning the value.
+ Throws exceptions if there are network problems or if the remote code
+ threw an exception.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #call(Writable[], InetSocketAddress[], Class, UserGroupInformation)} instead">
+ <param name="params" type="org.apache.hadoop.io.Writable[]"/>
+ <param name="addresses" type="java.net.InetSocketAddress[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Makes a set of calls in parallel. Each parameter is sent to the
+ corresponding address. When all values are available, or have timed out
+ or errored, the collected results are returned in an array. The array
+ contains nulls for calls that timed out or errored.
+ @deprecated Use {@link #call(Writable[], InetSocketAddress[], Class, UserGroupInformation)} instead]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="params" type="org.apache.hadoop.io.Writable[]"/>
+ <param name="addresses" type="java.net.InetSocketAddress[]"/>
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Makes a set of calls in parallel. Each parameter is sent to the
+ corresponding address. When all values are available, or have timed out
+ or errored, the collected results are returned in an array. The array
+ contains nulls for calls that timed out or errored.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A client for an IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Server]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Client -->
+ <!-- start class org.apache.hadoop.ipc.RemoteException -->
+ <class name="RemoteException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RemoteException" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lookupTypes" type="java.lang.Class[]"/>
+ <doc>
+ <![CDATA[If this remote exception wraps up one of the lookupTypes
+ then return this exception.
+ <p>
+ Unwraps any IOException.
+
+ @param lookupTypes the desired exception class.
+ @return IOException, which is either the lookupClass exception or this.]]>
+ </doc>
+ </method>
+ <method name="unwrapRemoteException" return="java.io.IOException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Instantiate and return the exception wrapped up by this remote exception.
+
+ <p> This unwraps any <code>Throwable</code> that has a constructor taking
+ a <code>String</code> as a parameter.
+ Otherwise it returns this.
+
+ @return <code>Throwable]]>
+ </doc>
+ </method>
+ <method name="writeXml"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="doc" type="org.znerd.xmlenc.XMLOutputter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the object to XML format]]>
+ </doc>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.ipc.RemoteException"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attrs" type="org.xml.sax.Attributes"/>
+ <doc>
+ <![CDATA[Create RemoteException from attributes]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RemoteException -->
+ <!-- start class org.apache.hadoop.ipc.RPC -->
+ <class name="RPC" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="waitForProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="factory" type="javax.net.SocketFactory"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object that implements the named protocol,
+ talking to a server at the named address.]]>
+ </doc>
+ </method>
+ <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="clientVersion" type="long"/>
+ <param name="addr" type="java.net.InetSocketAddress"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a client-side proxy object with the default SocketFactory
+
+ @param protocol
+ @param clientVersion
+ @param addr
+ @param conf
+ @return a proxy instance
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopProxy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="proxy" type="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <doc>
+ <![CDATA[Stop this proxy and release its invoker's resource
+ @param proxy the proxy to be stopped]]>
+ </doc>
+ </method>
+ <method name="call" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link #call(Method, Object[][], InetSocketAddress[], UserGroupInformation, Configuration)} instead">
+ <param name="method" type="java.lang.reflect.Method"/>
+ <param name="params" type="java.lang.Object[][]"/>
+ <param name="addrs" type="java.net.InetSocketAddress[]"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Expert: Make multiple, parallel calls to a set of servers.
+ @deprecated Use {@link #call(Method, Object[][], InetSocketAddress[], UserGroupInformation, Configuration)} instead]]>
+ </doc>
+ </method>
+ <method name="call" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="method" type="java.lang.reflect.Method"/>
+ <param name="params" type="java.lang.Object[][]"/>
+ <param name="addrs" type="java.net.InetSocketAddress[]"/>
+ <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Expert: Make multiple, parallel calls to a set of servers.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="instance" type="java.lang.Object"/>
+ <param name="bindAddress" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="numHandlers" type="int"/>
+ <param name="verbose" type="boolean"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a server for a protocol implementation instance listening on a
+ port and address.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple RPC mechanism.
+
+ A <i>protocol</i> is a Java interface. All parameters and return types must
+ be one of:
+
+ <ul> <li>a primitive type, <code>boolean</code>, <code>byte</code>,
+ <code>char</code>, <code>short</code>, <code>int</code>, <code>long</code>,
+ <code>float</code>, <code>double</code>, or <code>void</code>; or</li>
+
+ <li>a {@link String}; or</li>
+
+ <li>a {@link Writable}; or</li>
+
+ <li>an array of the above types</li> </ul>
+
+ All methods in the protocol should throw only IOException. No field data of
+ the protocol instance is transmitted.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC -->
+ <!-- start class org.apache.hadoop.ipc.RPC.Server -->
+ <class name="RPC.Server" extends="org.apache.hadoop.ipc.Server"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on]]>
+ </doc>
+ </constructor>
+ <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int, int, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct an RPC server.
+ @param instance the instance whose methods will be called
+ @param conf the configuration to use
+ @param bindAddress the address to bind on to listen for connection
+ @param port the port to listen for connections on
+ @param numHandlers the number of method handler threads to run
+ @param verbose whether each call should be logged]]>
+ </doc>
+ </constructor>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receivedTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="authorize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="javax.security.auth.Subject"/>
+ <param name="connection" type="org.apache.hadoop.ipc.ConnectionHeader"/>
+ <exception name="AuthorizationException" type="org.apache.hadoop.security.authorize.AuthorizationException"/>
+ </method>
+ <doc>
+ <![CDATA[An RPC Server.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.Server -->
+ <!-- start class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <class name="RPC.VersionMismatch" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RPC.VersionMismatch" type="java.lang.String, long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a version mismatch exception
+ @param interfaceName the name of the protocol mismatch
+ @param clientVersion the client's version of the protocol
+ @param serverVersion the server's version of the protocol]]>
+ </doc>
+ </constructor>
+ <method name="getInterfaceName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the interface name
+ @return the java class name
+ (eg. org.apache.hadoop.mapred.InterTrackerProtocol)]]>
+ </doc>
+ </method>
+ <method name="getClientVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the client's preferred version]]>
+ </doc>
+ </method>
+ <method name="getServerVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the server's agreed to version.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A version mismatch for the RPC protocol.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.RPC.VersionMismatch -->
+ <!-- start class org.apache.hadoop.ipc.Server -->
+ <class name="Server" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class, int, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="Server" type="java.lang.String, int, java.lang.Class, int, org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a server listening on the named port and address. Parameters passed must
+ be of the named class. The <code>handlerCount</handlerCount> determines
+ the number of handler threads that will be used to process calls.]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.ipc.Server"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the server instance called under or null. May be called under
+ {@link #call(Writable, long)} implementations, and under {@link Writable}
+ methods of paramters and return values. Permits applications to access
+ the server context.]]>
+ </doc>
+ </method>
+ <method name="getRemoteIp" return="java.net.InetAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the remote side ip address when invoked inside an RPC
+ Returns null incase of an error.]]>
+ </doc>
+ </method>
+ <method name="getRemoteAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns remote address as a string when invoked inside an RPC.
+ Returns null in case of an error.]]>
+ </doc>
+ </method>
+ <method name="bind"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.ServerSocket"/>
+ <param name="address" type="java.net.InetSocketAddress"/>
+ <param name="backlog" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A convenience method to bind to a given address and report
+ better exceptions if the address is not a valid host.
+ @param socket the socket to bind
+ @param address the address to bind to
+ @param backlog the number of connections allowed in the queue
+ @throws BindException if the address can't be bound
+ @throws UnknownHostException if the address isn't a valid host name
+ @throws IOException other random errors from bind]]>
+ </doc>
+ </method>
+ <method name="setSocketSendBufSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="size" type="int"/>
+ <doc>
+ <![CDATA[Sets the socket buffer size used for responding to RPCs]]>
+ </doc>
+ </method>
+ <method name="start"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts the service. Must be called before any calls will be handled.]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops the service. No new calls will be handled after this is called.]]>
+ </doc>
+ </method>
+ <method name="join"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Wait for the server to be stopped.
+ Does not wait for all subthreads to finish.
+ See {@link #stop()}.]]>
+ </doc>
+ </method>
+ <method name="getListenerAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the socket (ip+port) on which the RPC server is listening to.
+ @return the socket (ip+port) on which the RPC server is listening to.]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link #call(Class, Writable, long)} instead">
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receiveTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called for each call.
+ @deprecated Use {@link #call(Class, Writable, long)} instead]]>
+ </doc>
+ </method>
+ <method name="call" return="org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.Class"/>
+ <param name="param" type="org.apache.hadoop.io.Writable"/>
+ <param name="receiveTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called for each call.]]>
+ </doc>
+ </method>
+ <method name="authorize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="javax.security.auth.Subject"/>
+ <param name="connection" type="org.apache.hadoop.ipc.ConnectionHeader"/>
+ <exception name="AuthorizationException" type="org.apache.hadoop.security.authorize.AuthorizationException"/>
+ <doc>
+ <![CDATA[Authorize the incoming client connection.
+
+ @param user client user
+ @param connection incoming connection
+ @throws AuthorizationException when the client isn't authorized to talk the protocol]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <field name="HEADER" type="java.nio.ByteBuffer"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The first four bytes of Hadoop RPC connections]]>
+ </doc>
+ </field>
+ <field name="CURRENT_VERSION" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="rpcMetrics" type="org.apache.hadoop.ipc.metrics.RpcMetrics"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An abstract IPC service. IPC calls take a single {@link Writable} as a
+ parameter, and return a {@link Writable} as their value. A service runs on
+ a port and is defined by a parameter class and a value class.
+
+ @see Client]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.Server -->
+ <!-- start interface org.apache.hadoop.ipc.VersionedProtocol -->
+ <interface name="VersionedProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return protocol version corresponding to protocol interface.
+ @param protocol The classname of the protocol interface
+ @param clientVersion The version of the protocol that the client speaks
+ @return the version that the server will speak]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Superclass of all protocols that use Hadoop RPC.
+ Subclasses of this interface are also supposed to have
+ a static final long versionID field.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.VersionedProtocol -->
+</package>
+<package name="org.apache.hadoop.ipc.metrics">
+ <!-- start class org.apache.hadoop.ipc.metrics.RpcActivityMBean -->
+ <class name="RpcActivityMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RpcActivityMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param mr - the metrics registry that has all the metrics
+ @param serviceName - the service name for the rpc service
+ @param port - the rpc port.]]>
+ </doc>
+ </constructor>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is the JMX MBean for reporting the RPC layer Activity.
+ The MBean is register using the name
+ "hadoop:service=<RpcServiceName>,name=RpcActivityForPort<port>"
+
+ Many of the activity metrics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the metrics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most metrics contexts do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ rpc.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically
+
+
+
+ Impl details: We use a dynamic mbean that gets the list of the metrics
+ from the metrics registry passed as an argument to the constructor]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.metrics.RpcActivityMBean -->
+ <!-- start class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <class name="RpcMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <constructor name="RpcMetrics" type="java.lang.String, java.lang.String, org.apache.hadoop.ipc.Server"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Push the metrics to the monitoring subsystem on doUpdate() call.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="rpcQueueTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The metrics variables are public:
+ - they can be set directly by calling their set/inc methods
+ -they can also be read directly - e.g. JMX does this.]]>
+ </doc>
+ </field>
+ <field name="rpcProcessingTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="numOpenConnections" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="callQueueLen" type="org.apache.hadoop.metrics.util.MetricsIntValue"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class is for maintaining the various RPC statistics
+ and publishing them through the metrics interfaces.
+ This also registers the JMX MBean for RPC.
+ <p>
+ This class has a number of metrics variables that are publicly accessible;
+ these variables (objects) have methods to update their values;
+ for example:
+ <p> {@link #rpcQueueTime}.inc(time)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.ipc.metrics.RpcMetrics -->
+ <!-- start interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+ <interface name="RpcMgtMBean" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRpcOpsNumber" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of RPC Operations in the last interval
+ @return number of operations]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Average time for RPC Operations in last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgProcessingTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Processing Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Average RPC Operation Queued Time in the last interval
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMin" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Minimum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="getRpcOpsAvgQueueTimeMax" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Maximum RPC Operation Queued Time since reset was called
+ @return time in msec]]>
+ </doc>
+ </method>
+ <method name="resetAllMinMax"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset all min max times]]>
+ </doc>
+ </method>
+ <method name="getNumOpenConnections" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of open RPC conections
+ @return the number of open rpc connections]]>
+ </doc>
+ </method>
+ <method name="getCallQueueLen" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of rpc calls in the queue.
+ @return The number of rpc calls in the queue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the JMX management interface for the RPC layer.
+ Many of the statistics are sampled and averaged on an interval
+ which can be specified in the metrics config file.
+ <p>
+ For the statistics that are sampled and averaged, one must specify
+ a metrics context that does periodic update calls. Most do.
+ The default Null metrics context however does NOT. So if you aren't
+ using any other metrics context then you can turn on the viewing and averaging
+ of sampled metrics by specifying the following two lines
+ in the hadoop-meterics.properties file:
+ <pre>
+ rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
+ rpc.period=10
+ </pre>
+<p>
+ Note that the metrics are collected regardless of the context used.
+ The context with the update thread is used to average the data periodically]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.ipc.metrics.RpcMgtMBean -->
+</package>
+<package name="org.apache.hadoop.log">
+ <!-- start class org.apache.hadoop.log.LogLevel -->
+ <class name="LogLevel" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[A command line implementation]]>
+ </doc>
+ </method>
+ <field name="USAGES" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Change log level in runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel -->
+ <!-- start class org.apache.hadoop.log.LogLevel.Servlet -->
+ <class name="LogLevel.Servlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LogLevel.Servlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A servlet implementation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.log.LogLevel.Servlet -->
+</package>
+<package name="org.apache.hadoop.metrics">
+ <!-- start class org.apache.hadoop.metrics.ContextFactory -->
+ <class name="ContextFactory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ContextFactory"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of ContextFactory]]>
+ </doc>
+ </constructor>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the named attribute, or null if there is no
+ attribute of that name.
+
+ @param attributeName the attribute name
+ @return the attribute value]]>
+ </doc>
+ </method>
+ <method name="getAttributeNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all the factory's attributes.
+
+ @return the attribute names]]>
+ </doc>
+ </method>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Sets the named factory attribute to the specified value, creating it
+ if it did not already exist. If the value is null, this is the same as
+ calling removeAttribute.
+
+ @param attributeName the attribute name
+ @param value the new attribute value]]>
+ </doc>
+ </method>
+ <method name="removeAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes the named attribute if it exists.
+
+ @param attributeName the attribute name]]>
+ </doc>
+ </method>
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="refName" type="java.lang.String"/>
+ <param name="contextName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <exception name="InstantiationException" type="java.lang.InstantiationException"/>
+ <exception name="IllegalAccessException" type="java.lang.IllegalAccessException"/>
+ <doc>
+ <![CDATA[Returns the named MetricsContext instance, constructing it if necessary
+ using the factory's current configuration attributes. <p/>
+
+ When constructing the instance, if the factory property
+ <i>contextName</i>.class</code> exists,
+ its value is taken to be the name of the class to instantiate. Otherwise,
+ the default is to create an instance of
+ <code>org.apache.hadoop.metrics.spi.NullContext</code>, which is a
+ dummy "no-op" context which will cause all metric data to be discarded.
+
+ @param contextName the name of the context
+ @return the named MetricsContext]]>
+ </doc>
+ </method>
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <exception name="InstantiationException" type="java.lang.InstantiationException"/>
+ <exception name="IllegalAccessException" type="java.lang.IllegalAccessException"/>
+ </method>
+ <method name="getNullContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a "null" context - one which does nothing.]]>
+ </doc>
+ </method>
+ <method name="getFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the singleton ContextFactory instance, constructing it if
+ necessary. <p/>
+
+ When the instance is constructed, this method checks if the file
+ <code>hadoop-metrics.properties</code> exists on the class path. If it
+ exists, it must be in the format defined by java.util.Properties, and all
+ the properties in the file are set as attributes on the newly created
+ ContextFactory instance.
+
+ @return the singleton ContextFactory instance]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Factory class for creating MetricsContext objects. To obtain an instance
+ of this class, use the static <code>getFactory()</code> method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ContextFactory -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsContext -->
+ <interface name="MetricsContext" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ <doc>
+ <![CDATA[Initialize this context.
+ @param contextName The given name for this context
+ @param factory The creator of this context]]>
+ </doc>
+ </method>
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.
+
+ @return the context name]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records as they are
+ updated.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free any data that the implementation
+ may have buffered for sending at the next timer event. It
+ is OK to call <code>startMonitoring()</code> again after calling
+ this.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and also frees any buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new MetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at regular time intervals, as
+ determined by the implementation-class specific configuration.
+
+ @param updater object to be run periodically; it should updated
+ some metrics records and then return]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <method name="getPeriod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the timer period.]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_PERIOD" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default period in seconds at which data is sent to the metrics system.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The main interface to the metrics package.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.MetricsException -->
+ <class name="MetricsException" extends="java.lang.RuntimeException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException]]>
+ </doc>
+ </constructor>
+ <constructor name="MetricsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricsException
+
+ @param message an error message]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[General-purpose, unchecked metrics exception.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsException -->
+ <!-- start interface org.apache.hadoop.metrics.MetricsRecord -->
+ <interface name="MetricsRecord" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value. The tagValue may be null,
+ which is treated the same as an empty String.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.
+
+ @param tagName name of a tag]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes, from the buffered data table, all rows having tags
+ that equal the tags that have been set on this record. For example,
+ if there are no tags on this record, all rows for this record name
+ would be removed. Or, if there is a single tag on this record, then
+ just rows containing a tag with the same name and value would be removed.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A named and optionally tagged set of records to be sent to the metrics
+ system. <p/>
+
+ A record name identifies the kind of data to be reported. For example, a
+ program reporting statistics relating to the disks on a computer might use
+ a record name "diskStats".<p/>
+
+ A record has zero or more <i>tags</i>. A tag has a name and a value. To
+ continue the example, the "diskStats" record might use a tag named
+ "diskName" to identify a particular disk. Sometimes it is useful to have
+ more than one tag, so there might also be a "diskType" with value "ide" or
+ "scsi" or whatever.<p/>
+
+ A record also has zero or more <i>metrics</i>. These are the named
+ values that are to be reported to the metrics system. In the "diskStats"
+ example, possible metric names would be "diskPercentFull", "diskPercentBusy",
+ "kbReadPerSecond", etc.<p/>
+
+ The general procedure for using a MetricsRecord is to fill in its tag and
+ metric values, and then call <code>update()</code> to pass the record to the
+ client library.
+ Metric data is not immediately sent to the metrics system
+ each time that <code>update()</code> is called.
+ An internal table is maintained, identified by the record name. This
+ table has columns
+ corresponding to the tag and the metric names, and rows
+ corresponding to each unique set of tag values. An update
+ either modifies an existing row in the table, or adds a new row with a set of
+ tag values that are different from all the other rows. Note that if there
+ are no tags, then there can be at most one row in the table. <p/>
+
+ Once a row is added to the table, its data will be sent to the metrics system
+ on every timer period, whether or not it has been updated since the previous
+ timer period. If this is inappropriate, for example if metrics were being
+ reported by some transient object in an application, the <code>remove()</code>
+ method can be used to remove the row and thus stop the data from being
+ sent.<p/>
+
+ Note that the <code>update()</code> method is atomic. This means that it is
+ safe for different threads to be updating the same metric. More precisely,
+ it is OK for different threads to call <code>update()</code> on MetricsRecord instances
+ with the same set of tag names and tag values. Different threads should
+ <b>not</b> use the same MetricsRecord instance at the same time.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.MetricsRecord -->
+ <!-- start class org.apache.hadoop.metrics.MetricsUtil -->
+ <class name="MetricsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ </method>
+ <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="refName" type="java.lang.String"/>
+ <param name="contextName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to return the named context.
+ If the desired context cannot be created for any reason, the exception
+ is logged, and a null context is returned.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Utility method to create and return new metrics record instance within the
+ given context. This record is tagged with the host name.
+
+ @param context the context
+ @param recordName name of the record
+ @return newly created metrics record]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Utility class to simplify creation and reporting of hadoop metrics.
+
+ For examples of usage, see NameNodeMetrics.
+ @see org.apache.hadoop.metrics.MetricsRecord
+ @see org.apache.hadoop.metrics.MetricsContext
+ @see org.apache.hadoop.metrics.ContextFactory]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.MetricsUtil -->
+ <!-- start interface org.apache.hadoop.metrics.Updater -->
+ <interface name="Updater" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[Timer-based call-back from the metric library.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Call-back interface. See <code>MetricsContext.registerUpdater()</code>.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.metrics.Updater -->
+</package>
+<package name="org.apache.hadoop.metrics.file">
+ <!-- start class org.apache.hadoop.metrics.file.FileContext -->
+ <class name="FileContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="getFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the configured file name, or null.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, by opening in append-mode, the
+ file specified by the <code>fileName</code> attribute,
+ if specified. Otherwise the data will be written to standard
+ output.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring, closing the file.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Emits a metrics record to a file.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Flushes the output writer, forcing updates to disk.]]>
+ </doc>
+ </method>
+ <field name="FILE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="PERIOD_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Metrics context for writing metrics to a file.<p/>
+
+ This class is configured by setting ContextFactory attributes which in turn
+ are usually configured through a properties file. All the attributes are
+ prefixed by the contextName. For example, the properties file might contain:
+ <pre>
+ myContextName.fileName=/tmp/metrics.log
+ myContextName.period=5
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.file.FileContext -->
+</package>
+<package name="org.apache.hadoop.metrics.ganglia">
+ <!-- start class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+ <class name="GangliaContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GangliaContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of GangliaContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Context for sending metrics to Ganglia.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.ganglia.GangliaContext -->
+</package>
+<package name="org.apache.hadoop.metrics.jvm">
+ <!-- start class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <class name="EventCounter" extends="org.apache.log4j.AppenderSkeleton"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="EventCounter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getFatal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getError" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getWarn" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfo" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="requiresLayout" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A log4J Appender that simply counts logging events in three levels:
+ fatal, error and warn.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.EventCounter -->
+ <!-- start class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+ <class name="JvmMetrics" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.Updater"/>
+ <method name="init" return="org.apache.hadoop.metrics.jvm.JvmMetrics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="processName" type="java.lang.String"/>
+ <param name="sessionId" type="java.lang.String"/>
+ </method>
+ <method name="init" return="org.apache.hadoop.metrics.jvm.JvmMetrics"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="processName" type="java.lang.String"/>
+ <param name="sessionId" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ </method>
+ <method name="doUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/>
+ <doc>
+ <![CDATA[This will be called periodically (with the period being configuration
+ dependent).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Singleton class which reports Java Virtual Machine metrics to the metrics API.
+ Any application can create an instance of this class in order to emit
+ Java VM metrics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.jvm.JvmMetrics -->
+</package>
+<package name="org.apache.hadoop.metrics.spi">
+ <!-- start class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <class name="AbstractMetricsContext" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsContext"/>
+ <constructor name="AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of AbstractMetricsContext]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ <doc>
+ <![CDATA[Initializes the context.]]>
+ </doc>
+ </method>
+ <method name="getAttribute" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for subclasses to access factory attributes.]]>
+ </doc>
+ </method>
+ <method name="getAttributeTable" return="java.util.Map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="tableName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an attribute-value map derived from the factory attributes
+ by finding all factory attributes that begin with
+ <i>contextName</i>.<i>tableName</i>. The returned map consists of
+ those attributes with the contextName and tableName stripped off.]]>
+ </doc>
+ </method>
+ <method name="getContextName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the context name.]]>
+ </doc>
+ </method>
+ <method name="getContextFactory" return="org.apache.hadoop.metrics.ContextFactory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the factory by which this context was created.]]>
+ </doc>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Starts or restarts monitoring, the emitting of metrics records.]]>
+ </doc>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring. This does not free buffered data.
+ @see #close()]]>
+ </doc>
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if monitoring is currently in progress.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Stops monitoring and frees buffered data, returning this
+ object to its initial state.]]>
+ </doc>
+ </method>
+ <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Creates a new AbstractMetricsRecord instance with the given <code>recordName</code>.
+ Throws an exception if the metrics implementation is configured with a fixed
+ set of record names and <code>recordName</code> is not in that set.
+
+ @param recordName the name of the record
+ @throws MetricsException if recordName conflicts with configuration data]]>
+ </doc>
+ </method>
+ <method name="newRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Subclasses should override this if they subclass MetricsRecordImpl.
+ @param recordName the name of the record
+ @return newly created instance of MetricsRecordImpl or subclass]]>
+ </doc>
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Registers a callback to be called at time intervals determined by
+ the configuration.
+
+ @param updater object to be run periodically; it should update
+ some metrics records]]>
+ </doc>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ <doc>
+ <![CDATA[Removes a callback, if it exists.
+
+ @param updater object to be removed from the callback list]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sends a record to the metrics system.]]>
+ </doc>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called each period after all records have been emitted, this method does nothing.
+ Subclasses may override it in order to perform some kind of flush.]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.update(). Creates or updates a row in
+ the internal table of metric data.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Called by MetricsRecordImpl.remove(). Removes all matching rows in
+ the internal table of metric data. A row matches if it has the same
+ tag names and values as record, but it may also have additional
+ tags.]]>
+ </doc>
+ </method>
+ <method name="getPeriod" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the timer period.]]>
+ </doc>
+ </method>
+ <method name="setPeriod"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="period" type="int"/>
+ <doc>
+ <![CDATA[Sets the timer period]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The main class of the Service Provider Interface. This class should be
+ extended in order to integrate the Metrics API with a specific metrics
+ client library. <p/>
+
+ This class implements the internal table of metric data, and the timer
+ on which data is to be sent to the metrics system. Subclasses must
+ override the abstract <code>emitRecord</code> method in order to transmit
+ the data. <p/>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.AbstractMetricsContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.CompositeContext -->
+ <class name="CompositeContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CompositeContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="newRecord" return="org.apache.hadoop.metrics.MetricsRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recordName" type="java.lang.String"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="stopMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isMonitoring" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if all subcontexts are monitoring.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="registerUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ </method>
+ <method name="unregisterUpdater"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="updater" type="org.apache.hadoop.metrics.Updater"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.CompositeContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <class name="MetricsRecordImpl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.metrics.MetricsRecord"/>
+ <constructor name="MetricsRecordImpl" type="java.lang.String, org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of FileRecord]]>
+ </doc>
+ </constructor>
+ <method name="getRecordName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the record name.
+
+ @return the record name]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <param name="tagValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named tag to the specified value.
+
+ @param tagName name of the tag
+ @param tagValue new value of the tag
+ @throws MetricsException if the tagName conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="removeTag"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tagName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Removes any tag of the specified name.]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="setMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Sets the named metric to the specified value.
+
+ @param metricName name of the metric
+ @param metricValue new value of the metric
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="int"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="long"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="short"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="byte"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="incrMetric"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricName" type="java.lang.String"/>
+ <param name="metricValue" type="float"/>
+ <doc>
+ <![CDATA[Increments the named metric by the specified value.
+
+ @param metricName name of the metric
+ @param metricValue incremental value
+ @throws MetricsException if the metricName or the type of the metricValue
+ conflicts with the configuration]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Updates the table of buffered data which is to be sent periodically.
+ If the tag values match an existing row, that row is updated;
+ otherwise, a new row is added.]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes the row, if it exists, in the buffered data table having tags
+ that equal the tags that have been set on this record.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of MetricsRecord. Keeps a back-pointer to the context
+ from which it was created, and delegates back to it on <code>update</code>
+ and <code>remove()</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricsRecordImpl -->
+ <!-- start class org.apache.hadoop.metrics.spi.MetricValue -->
+ <class name="MetricValue" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricValue" type="java.lang.Number, boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of MetricValue]]>
+ </doc>
+ </constructor>
+ <method name="isIncrement" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isAbsolute" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumber" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="ABSOLUTE" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCREMENT" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Number that is either an absolute or an incremental amount.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.MetricValue -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContext -->
+ <class name="NullContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContext]]>
+ </doc>
+ </constructor>
+ <method name="startMonitoring"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Do-nothing version of startMonitoring]]>
+ </doc>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Null metrics context: a metrics context which does nothing. Used as the
+ default context, so that no performance data is emitted if no configuration
+ data is found.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContext -->
+ <!-- start class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <class name="NullContextWithUpdateThread" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullContextWithUpdateThread"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of NullContextWithUpdateThread]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/>
+ </method>
+ <method name="emitRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="contextName" type="java.lang.String"/>
+ <param name="recordName" type="java.lang.String"/>
+ <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/>
+ <doc>
+ <![CDATA[Do-nothing version of emitRecord]]>
+ </doc>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of update]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/>
+ <doc>
+ <![CDATA[Do-nothing version of remove]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A null context which has a thread calling
+ periodically when monitoring is started. This keeps the data sampled
+ correctly.
+ In all other respects, this is like the NULL context: No data is emitted.
+ This is suitable for Monitoring systems like JMX which reads the metrics
+ when someone reads the data from JMX.
+
+ The default impl of start and stop monitoring:
+ is the AbstractMetricsContext is good enough.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -->
+ <!-- start class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <class name="OutputRecord" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTagNames" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of tag names]]>
+ </doc>
+ </method>
+ <method name="getTag" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a tag object which is can be a String, Integer, Short or Byte.
+
+ @return the tag value, or null if there is no such tag]]>
+ </doc>
+ </method>
+ <method name="getMetricNames" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the set of metric names.]]>
+ </doc>
+ </method>
+ <method name="getMetric" return="java.lang.Number"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the metric object which can be a Float, Integer, Short or Byte.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents a record of metric data to be sent to a metrics system.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.OutputRecord -->
+ <!-- start class org.apache.hadoop.metrics.spi.Util -->
+ <class name="Util" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="parse" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="specs" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Parses a space and/or comma separated sequence of server specifications
+ of the form <i>hostname</i> or <i>hostname:port</i>. If
+ the specs string is null, defaults to localhost:defaultPort.
+
+ @return a list of InetSocketAddress objects.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Static utility methods]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.spi.Util -->
+</package>
+<package name="org.apache.hadoop.metrics.util">
+ <!-- start class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <class name="MBeanUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MBeanUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="registerMBean" return="javax.management.ObjectName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="serviceName" type="java.lang.String"/>
+ <param name="nameName" type="java.lang.String"/>
+ <param name="theMbean" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Register the MBean using our standard MBeanName format
+ "hadoop:service=<serviceName>,name=<nameName>"
+ Where the <serviceName> and <nameName> are the supplied parameters
+
+ @param serviceName
+ @param nameName
+ @param theMbean - the MBean to register
+ @return the named used to register the MBean]]>
+ </doc>
+ </method>
+ <method name="unregisterMBean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mbeanName" type="javax.management.ObjectName"/>
+ </method>
+ <doc>
+ <![CDATA[This util class provides a method to register an MBean using
+ our standard naming convention as described in the doc
+ for {link {@link #registerMBean(String, String, Object)}]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MBeanUtil -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsBase -->
+ <class name="MetricsBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsBase" type="java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="MetricsBase" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="pushMetric"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDescription" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="NO_DESCRIPTION" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is base class for all metrics]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsBase -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase -->
+ <class name="MetricsDynamicMBeanBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="javax.management.DynamicMBean"/>
+ <constructor name="MetricsDynamicMBeanBase" type="org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getAttribute" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeName" type="java.lang.String"/>
+ <exception name="AttributeNotFoundException" type="javax.management.AttributeNotFoundException"/>
+ <exception name="MBeanException" type="javax.management.MBeanException"/>
+ <exception name="ReflectionException" type="javax.management.ReflectionException"/>
+ </method>
+ <method name="getAttributes" return="javax.management.AttributeList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributeNames" type="java.lang.String[]"/>
+ </method>
+ <method name="getMBeanInfo" return="javax.management.MBeanInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="invoke" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="actionName" type="java.lang.String"/>
+ <param name="parms" type="java.lang.Object[]"/>
+ <param name="signature" type="java.lang.String[]"/>
+ <exception name="MBeanException" type="javax.management.MBeanException"/>
+ <exception name="ReflectionException" type="javax.management.ReflectionException"/>
+ </method>
+ <method name="setAttribute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attribute" type="javax.management.Attribute"/>
+ <exception name="AttributeNotFoundException" type="javax.management.AttributeNotFoundException"/>
+ <exception name="InvalidAttributeValueException" type="javax.management.InvalidAttributeValueException"/>
+ <exception name="MBeanException" type="javax.management.MBeanException"/>
+ <exception name="ReflectionException" type="javax.management.ReflectionException"/>
+ </method>
+ <method name="setAttributes" return="javax.management.AttributeList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attributes" type="javax.management.AttributeList"/>
+ </method>
+ <doc>
+ <![CDATA[This abstract base class facilitates creating dynamic mbeans automatically from
+ metrics.
+ The metrics constructors registers metrics in a registry.
+ Different categories of metrics should be in differnt classes with their own
+ registry (as in NameNodeMetrics and DataNodeMetrics).
+ Then the MBean can be created passing the registry to the constructor.
+ The MBean should be then registered using a mbean name (example):
+ MetricsHolder myMetrics = new MetricsHolder(); // has metrics and registry
+ MetricsTestMBean theMBean = new MetricsTestMBean(myMetrics.mregistry);
+ ObjectName mbeanName = MBeanUtil.registerMBean("ServiceFoo",
+ "TestStatistics", theMBean);]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <class name="MetricsIntValue" extends="org.apache.hadoop.metrics.util.MetricsBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsIntValue" type="java.lang.String, org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric
+ @param registry - where the metrics object will be registered]]>
+ </doc>
+ </constructor>
+ <constructor name="MetricsIntValue" type="java.lang.String, org.apache.hadoop.metrics.util.MetricsRegistry"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric
+ @param registry - where the metrics object will be registered
+ A description of {@link #NO_DESCRIPTION} is used]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="int"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsIntValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsIntValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsLongValue -->
+ <class name="MetricsLongValue" extends="org.apache.hadoop.metrics.util.MetricsBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsLongValue" type="java.lang.String, org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric
+ @param registry - where the metrics object will be registered]]>
+ </doc>
+ </constructor>
+ <constructor name="MetricsLongValue" type="java.lang.String, org.apache.hadoop.metrics.util.MetricsRegistry"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric
+ @param registry - where the metrics object will be registered
+ A description of {@link #NO_DESCRIPTION} is used]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="long"/>
+ <doc>
+ <![CDATA[Set the value
+ @param newValue]]>
+ </doc>
+ </method>
+ <method name="get" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get value
+ @return the value last set]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the metric to the mr.
+ The metric is pushed only if it was updated since last push
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #get()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsLongValue class is for a metric that is not time varied
+ but changes only when it is set.
+ Each time its value is set, it is published only *once* at the next update
+ call.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsLongValue -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsRegistry -->
+ <class name="MetricsRegistry" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsRegistry"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return number of metrics in the registry]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricsName" type="java.lang.String"/>
+ <param name="theMetricsObj" type="org.apache.hadoop.metrics.util.MetricsBase"/>
+ <doc>
+ <![CDATA[Add a new metrics to the registry
+ @param metricsName - the name
+ @param theMetricsObj - the metrics
+ @throws IllegalArgumentException if a name is already registered]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.metrics.util.MetricsBase"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="metricsName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[@param metricsName
+ @return the metrics if there is one registered by the supplied name.
+ Returns null if none is registered]]>
+ </doc>
+ </method>
+ <method name="getKeyList" return="java.util.Collection"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the list of metrics names]]>
+ </doc>
+ </method>
+ <method name="getMetricsList" return="java.util.Collection"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the list of metrics]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the registry for metrics.
+ Related set of metrics should be declared in a holding class and registered
+ in a registry for those metrics which is also stored in the the holding class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsRegistry -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <class name="MetricsTimeVaryingInt" extends="org.apache.hadoop.metrics.util.MetricsBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingInt" type="java.lang.String, org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric
+ @param registry - where the metrics object will be registered
+ @param description - the description]]>
+ </doc>
+ </constructor>
+ <constructor name="MetricsTimeVaryingInt" type="java.lang.String, org.apache.hadoop.metrics.util.MetricsRegistry"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric
+ @param registry - where the metrics object will be registered
+ A description of {@link #NO_DESCRIPTION} is used]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="int"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - number of operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #previousIntervalValue}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Value at the Previous interval
+ @return prev interval value]]>
+ </doc>
+ </method>
+ <method name="getCurrentIntervalValue" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Value at the current interval
+ @return prev interval value]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingInt class is for a metric that naturally
+ varies over time (e.g. number of files created). The metrics is accumulated
+ over an interval (set in the metrics config file); the metrics is
+ published at the end of each interval and then
+ reset to zero. Hence the counter has the value in the current interval.
+
+ Note if one wants a time associated with the metric then use
+ @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingLong -->
+ <class name="MetricsTimeVaryingLong" extends="org.apache.hadoop.metrics.util.MetricsBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingLong" type="java.lang.String, org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric
+ @param registry - where the metrics object will be registered]]>
+ </doc>
+ </constructor>
+ <constructor name="MetricsTimeVaryingLong" type="java.lang.String, org.apache.hadoop.metrics.util.MetricsRegistry"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric
+ @param registry - where the metrics object will be registered
+ A description of {@link #NO_DESCRIPTION} is used]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Inc metrics for incr vlaue
+ @param incr - number of operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Inc metrics by one]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #previousIntervalValue}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalValue" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Value at the Previous interval
+ @return prev interval value]]>
+ </doc>
+ </method>
+ <method name="getCurrentIntervalValue" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The Value at the current interval
+ @return prev interval value]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingLong class is for a metric that naturally
+ varies over time (e.g. number of files created). The metrics is accumulated
+ over an interval (set in the metrics config file); the metrics is
+ published at the end of each interval and then
+ reset to zero. Hence the counter has the value in the current interval.
+
+ Note if one wants a time associated with the metric then use
+ @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingLong -->
+ <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+ <class name="MetricsTimeVaryingRate" extends="org.apache.hadoop.metrics.util.MetricsBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MetricsTimeVaryingRate" type="java.lang.String, org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric
+ @param registry - where the metrics object will be registered]]>
+ </doc>
+ </constructor>
+ <constructor name="MetricsTimeVaryingRate" type="java.lang.String, org.apache.hadoop.metrics.util.MetricsRegistry"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor - create a new metric
+ @param nam the name of the metrics to be used to publish the metric
+ @param registry - where the metrics object will be registered
+ A description of {@link #NO_DESCRIPTION} is used]]>
+ </doc>
+ </constructor>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numOps" type="int"/>
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for numOps operations
+ @param numOps - number of operations
+ @param time - time for numOps operations]]>
+ </doc>
+ </method>
+ <method name="inc"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="time" type="long"/>
+ <doc>
+ <![CDATA[Increment the metrics for one operation
+ @param time for one operation]]>
+ </doc>
+ </method>
+ <method name="pushMetric"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/>
+ <doc>
+ <![CDATA[Push the delta metrics to the mr.
+ The delta is since the last push/interval.
+
+ Note this does NOT push to JMX
+ (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and
+ {@link #getPreviousIntervalNumOps()}
+
+ @param mr]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalNumOps" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of operations in the previous interval
+ @return - ops in prev interval]]>
+ </doc>
+ </method>
+ <method name="getPreviousIntervalAverageTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The average rate of an operation in the previous interval
+ @return - the average rate.]]>
+ </doc>
+ </method>
+ <method name="getMinTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The min time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return min time for an operation]]>
+ </doc>
+ </method>
+ <method name="getMaxTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The max time for a single operation since the last reset
+ {@link #resetMinMax()}
+ @return max time for an operation]]>
+ </doc>
+ </method>
+ <method name="resetMinMax"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the min max values]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MetricsTimeVaryingRate class is for a rate based metric that
+ naturally varies over time (e.g. time taken to create a file).
+ The rate is averaged at each interval heart beat (the interval
+ is set in the metrics config file).
+ This class also keeps track of the min and max rates along with
+ a method to reset the min-max.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate -->
+</package>
+<package name="org.apache.hadoop.net">
+ <!-- start class org.apache.hadoop.net.CachedDNSToSwitchMapping -->
+ <class name="CachedDNSToSwitchMapping" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.DNSToSwitchMapping"/>
+ <constructor name="CachedDNSToSwitchMapping" type="org.apache.hadoop.net.DNSToSwitchMapping"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="resolve" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List"/>
+ </method>
+ <field name="rawMapping" type="org.apache.hadoop.net.DNSToSwitchMapping"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A cached implementation of DNSToSwitchMapping that takes an
+ raw DNSToSwitchMapping and stores the resolved network location in
+ a cache. The following calls to a resolved network location
+ will get its location from the cache.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.CachedDNSToSwitchMapping -->
+ <!-- start class org.apache.hadoop.net.DNS -->
+ <class name="DNS" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DNS"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reverseDns" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hostIp" type="java.net.InetAddress"/>
+ <param name="ns" type="java.lang.String"/>
+ <exception name="NamingException" type="javax.naming.NamingException"/>
+ <doc>
+ <![CDATA[Returns the hostname associated with the specified IP address by the
+ provided nameserver.
+
+ @param hostIp
+ The address to reverse lookup
+ @param ns
+ The host name of a reachable DNS server
+ @return The host name associated with the provided IP
+ @throws NamingException
+ If a NamingException is encountered]]>
+ </doc>
+ </method>
+ <method name="getIPs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the IPs associated with the provided interface, if any, in
+ textual form.
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return A string vector of all the IPs associated with the provided
+ interface
+ @throws UnknownHostException
+ If an UnknownHostException is encountered in querying the
+ default interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultIP" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the first available IP address associated with the provided
+ network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The IP address in text form
+ @throws UnknownHostException
+ If one is encountered in querying the default interface]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the provided nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return A string vector of all host names associated with the IPs tied to
+ the specified interface
+ @throws UnknownHostException]]>
+ </doc>
+ </method>
+ <method name="getHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns all the host names associated by the default nameserver with the
+ address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The list of host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <param name="nameserver" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the provided
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @param nameserver
+ The DNS host name
+ @return The default host names associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <method name="getDefaultHost" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strInterface" type="java.lang.String"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ <doc>
+ <![CDATA[Returns the default (first) host name associated by the default
+ nameserver with the address bound to the specified network interface
+
+ @param strInterface
+ The name of the network interface to query (e.g. eth0)
+ @return The default host name associated with IPs bound to the network
+ interface
+ @throws UnknownHostException
+ If one is encountered while querying the deault interface]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides direct and reverse lookup functionalities, allowing
+ the querying of specific network interfaces or nameservers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.DNS -->
+ <!-- start interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <interface name="DNSToSwitchMapping" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="resolve" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.List"/>
+ <doc>
+ <![CDATA[Resolves a list of DNS-names/IP-addresses and returns back a list of
+ switch information (network paths). One-to-one correspondence must be
+ maintained between the elements in the lists.
+ Consider an element in the argument list - x.y.com. The switch information
+ that is returned must be a network path of the form /foo/rack,
+ where / is the root, and 'foo' is the switch where 'rack' is connected.
+ Note the hostname/ip-address is not part of the returned path.
+ The network topology of the cluster would determine the number of
+ components in the network path.
+ @param names
+ @return list of resolved network paths]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An interface that should be implemented to allow pluggable
+ DNS-name/IP-address to RackID resolvers.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.DNSToSwitchMapping -->
+ <!-- start class org.apache.hadoop.net.NetUtils -->
+ <class name="NetUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="clazz" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get the socket factory for the given class according to its
+ configuration parameter
+ <tt>hadoop.rpc.socket.factory.class.&lt;ClassName&gt;</tt>. When no
+ such parameter exists then fall back on the default socket factory as
+ configured by <tt>hadoop.rpc.socket.factory.class.default</tt>. If
+ this default socket factory is not configured, then fall back on the JVM
+ default socket factory.
+
+ @param conf the configuration
+ @param clazz the class (usually a {@link VersionedProtocol})
+ @return a socket factory]]>
+ </doc>
+ </method>
+ <method name="getDefaultSocketFactory" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the default socket factory as specified by the configuration
+ parameter <tt>hadoop.rpc.socket.factory.default</tt>
+
+ @param conf the configuration
+ @return the default socket factory as specified in the configuration or
+ the JVM default socket factory if the configuration does not
+ contain a default socket factory property.]]>
+ </doc>
+ </method>
+ <method name="getSocketFactoryFromProperty" return="javax.net.SocketFactory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="propValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the socket factory corresponding to the given proxy URI. If the
+ given proxy URI corresponds to an absence of configuration parameter,
+ returns null. If the URI is malformed raises an exception.
+
+ @param propValue the property which is the class name of the
+ SocketFactory to instantiate; assumed non null and non empty.
+ @return a socket factory as defined in the property value.]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="createSocketAddr" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="target" type="java.lang.String"/>
+ <param name="defaultPort" type="int"/>
+ <doc>
+ <![CDATA[Util method to build socket addr from either:
+ <host>
+ <host>:<post>
+ <fs>://<host>:<port>/<path>]]>
+ </doc>
+ </method>
+ <method name="getServerAddress" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="oldBindAddressName" type="java.lang.String"/>
+ <param name="oldPortName" type="java.lang.String"/>
+ <param name="newBindAddressName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Handle the transition from pairs of attributes specifying a host and port
+ to a single colon separated one.
+ @param conf the configuration to check
+ @param oldBindAddressName the old address attribute name
+ @param oldPortName the old port attribute name
+ @param newBindAddressName the new combined name
+ @return the complete address from the configuration]]>
+ </doc>
+ </method>
+ <method name="addStaticResolution"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="resolvedName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a static resolution for host. This can be used for setting up
+ hostnames with names that are fake to point to a well known host. For e.g.
+ in some testcases we require to have daemons with different hostnames
+ running on the same machine. In order to create connections to these
+ daemons, one can set up mappings from those hostnames to "localhost".
+ {@link NetUtils#getStaticResolution(String)} can be used to query for
+ the actual hostname.
+ @param host
+ @param resolvedName]]>
+ </doc>
+ </method>
+ <method name="getStaticResolution" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Retrieves the resolved name for the passed host. The resolved name must
+ have been set earlier using
+ {@link NetUtils#addStaticResolution(String, String)}
+ @param host
+ @return the resolution]]>
+ </doc>
+ </method>
+ <method name="getAllStaticResolutions" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is used to get all the resolutions that were added using
+ {@link NetUtils#addStaticResolution(String, String)}. The return
+ value is a List each element of which contains an array of String
+ of the form String[0]=hostname, String[1]=resolved-hostname
+ @return the list of resolutions]]>
+ </doc>
+ </method>
+ <method name="getConnectAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="server" type="org.apache.hadoop.ipc.Server"/>
+ <doc>
+ <![CDATA[Returns InetSocketAddress that a client can use to
+ connect to the server. Server.getListenerAddress() is not correct when
+ the server binds to "0.0.0.0". This returns "127.0.0.1:port" when
+ the getListenerAddress() returns "0.0.0.0:port".
+
+ @param server
+ @return socket address that a client can use to connect to the server.]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getInputStream(socket, socket.getSoTimeout()).<br><br>
+
+ From documentation for {@link #getInputStream(Socket, long)}:<br>
+ Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see #getInputStream(Socket, long)
+
+ @param socket
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getInputStream" return="java.io.InputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns InputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketInputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getInputStream()} is returned. In the later
+ case, the timeout argument is ignored and the timeout set with
+ {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getInputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return InputStream for reading from the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as getOutputStream(socket, 0). Timeout of zero implies write will
+ wait until data is available.<br><br>
+
+ From documentation for {@link #getOutputStream(Socket, long)} : <br>
+ Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see #getOutputStream(Socket, long)
+
+ @param socket
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputStream" return="java.io.OutputStream"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="timeout" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns OutputStream for the socket. If the socket has an associated
+ SocketChannel then it returns a
+ {@link SocketOutputStream} with the given timeout. If the socket does not
+ have a channel, {@link Socket#getOutputStream()} is returned. In the later
+ case, the timeout argument is ignored and the write will wait until
+ data is available.<br><br>
+
+ Any socket created using socket factories returned by {@link #NetUtils},
+ must use this interface instead of {@link Socket#getOutputStream()}.
+
+ @see Socket#getChannel()
+
+ @param socket
+ @param timeout timeout in milliseconds. This may not always apply. zero
+ for waiting as long as necessary.
+ @return OutputStream for writing to the socket.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="connect"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="socket" type="java.net.Socket"/>
+ <param name="endpoint" type="java.net.SocketAddress"/>
+ <param name="timeout" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is a drop-in replacement for
+ {@link Socket#connect(SocketAddress, int)}.
+ In the case of normal sockets that don't have associated channels, this
+ just invokes <code>socket.connect(endpoint, timeout)</code>. If
+ <code>socket.getChannel()</code> returns a non-null channel,
+ connect is implemented using Hadoop's selectors. This is done mainly
+ to avoid Sun's connect implementation from creating thread-local
+ selectors, since Hadoop does not have control on when these are closed
+ and could end up taking all the available file descriptors.
+
+ @see java.net.Socket#connect(java.net.SocketAddress, int)
+
+ @param socket
+ @param endpoint
+ @param timeout - timeout in milliseconds]]>
+ </doc>
+ </method>
+ <method name="normalizeHostName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a string representation of a host, return its ip address
+ in textual presentation.
+
+ @param name a string representation of a host:
+ either a textual representation its IP address or its host name
+ @return its IP address in the string format]]>
+ </doc>
+ </method>
+ <method name="normalizeHostNames" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="names" type="java.util.Collection"/>
+ <doc>
+ <![CDATA[Given a collection of string representation of hosts, return a list of
+ corresponding IP addresses in the textual representation.
+
+ @param names a collection of string representations of hosts
+ @return a list of corresponding IP addresses in the string format
+ @see #normalizeHostName(String)]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetUtils -->
+ <!-- start class org.apache.hadoop.net.NetworkTopology -->
+ <class name="NetworkTopology" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NetworkTopology"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Add a leaf node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be added
+ @exception IllegalArgumentException if add a node to a leave
+ or node to be added is not a leaf]]>
+ </doc>
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Remove a node
+ Update node counter & rack counter if neccessary
+ @param node
+ node to be removed]]>
+ </doc>
+ </method>
+ <method name="contains" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if the tree contains node <i>node</i>
+
+ @param node
+ a node
+ @return true if <i>node</i> is already in the tree; false otherwise]]>
+ </doc>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="loc" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a string representation of a node, return its reference
+
+ @param loc
+ a path-like string representation of a node
+ @return a reference to the node; null if the node is not in the tree]]>
+ </doc>
+ </method>
+ <method name="getNumOfRacks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of racks]]>
+ </doc>
+ </method>
+ <method name="getNumOfLeaves" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the total number of nodes]]>
+ </doc>
+ </method>
+ <method name="getDistance" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return the distance between two nodes
+ It is assumed that the distance from one node to its parent is 1
+ The distance between two nodes is calculated by summing up their distances
+ to their closest common ancestor.
+ @param node1 one node
+ @param node2 another node
+ @return the distance between node1 and node2
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="isOnSameRack" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node1" type="org.apache.hadoop.net.Node"/>
+ <param name="node2" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Check if two nodes are on the same rack
+ @param node1 one node
+ @param node2 another node
+ @return true if node1 and node2 are pm the same rack; false otherwise
+ @exception IllegalArgumentException when either node1 or node2 is null, or
+ node1 or node2 do not belong to the cluster]]>
+ </doc>
+ </method>
+ <method name="chooseRandom" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <doc>
+ <![CDATA[randomly choose one node from <i>scope</i>
+ if scope starts with ~, choose one from the all nodes except for the
+ ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
+ @param scope range of nodes from which a node will be choosen
+ @return the choosen node]]>
+ </doc>
+ </method>
+ <method name="countNumOfAvailableNodes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="scope" type="java.lang.String"/>
+ <param name="excludedNodes" type="java.util.List"/>
+ <doc>
+ <![CDATA[return the number of leaves in <i>scope</i> but not in <i>excludedNodes</i>
+ if scope starts with ~, return the number of nodes that are not
+ in <i>scope</i> and <i>excludedNodes</i>;
+ @param scope a path string that may start with ~
+ @param excludedNodes a list of nodes
+ @return number of available nodes]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[convert a network tree to a string]]>
+ </doc>
+ </method>
+ <method name="pseudoSortByDistance"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reader" type="org.apache.hadoop.net.Node"/>
+ <param name="nodes" type="org.apache.hadoop.net.Node[]"/>
+ <doc>
+ <![CDATA[Sort nodes array by their distances to <i>reader</i>
+ It linearly scans the array, if a local node is found, swap it with
+ the first element of the array.
+ If a local rack node is found, swap it with the first element following
+ the local node.
+ If neither local node or local rack node is found, put a random replica
+ location at postion 0.
+ It leaves the rest nodes untouched.]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_RACK" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_HOST_LEVEL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The class represents a cluster of computer with a tree hierarchical
+ network topology.
+ For example, a cluster may be consists of many data centers filled
+ with racks of computers.
+ In a network topology, leaves represent data nodes (computers) and inner
+ nodes represent switches/routers that manage traffic in/out of data centers
+ or racks.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NetworkTopology -->
+ <!-- start interface org.apache.hadoop.net.Node -->
+ <interface name="Node" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the string representation of this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the node's network location]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The interface defines a node in a network topology.
+ A node may be a leave representing a data node or an inner
+ node representing a datacenter or rack.
+ Each data has a name and its location in the network is
+ decided by a string with syntax similar to a file name.
+ For example, a data node's name is hostname:port# and if it's located at
+ rack "orange" in datacenter "dog", the string representation of its
+ network location is /dog/orange]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.net.Node -->
+ <!-- start class org.apache.hadoop.net.NodeBase -->
+ <class name="NodeBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.net.Node"/>
+ <constructor name="NodeBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its path
+ @param path
+ a concatenation of this node's location, the path seperator, and its name]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location]]>
+ </doc>
+ </constructor>
+ <constructor name="NodeBase" type="java.lang.String, java.lang.String, org.apache.hadoop.net.Node, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a node from its name and its location
+ @param name this node's name
+ @param location this node's location
+ @param parent this node's parent node
+ @param level this node's level in the tree]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's name]]>
+ </doc>
+ </method>
+ <method name="getNetworkLocation" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's network location]]>
+ </doc>
+ </method>
+ <method name="setNetworkLocation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="location" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set this node's network location]]>
+ </doc>
+ </method>
+ <method name="getPath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Return this node's path]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's string representation]]>
+ </doc>
+ </method>
+ <method name="normalize" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Normalize a path]]>
+ </doc>
+ </method>
+ <method name="getParent" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's parent]]>
+ </doc>
+ </method>
+ <method name="setParent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="parent" type="org.apache.hadoop.net.Node"/>
+ <doc>
+ <![CDATA[Set this node's parent]]>
+ </doc>
+ </method>
+ <method name="getLevel" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return this node's level in the tree.
+ E.g. the root of a tree returns 0 and its children return 1]]>
+ </doc>
+ </method>
+ <method name="setLevel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="level" type="int"/>
+ <doc>
+ <![CDATA[Set this node's level in the tree]]>
+ </doc>
+ </method>
+ <field name="PATH_SEPARATOR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PATH_SEPARATOR_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ROOT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="name" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="location" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="level" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="parent" type="org.apache.hadoop.net.Node"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class that implements interface Node]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.NodeBase -->
+ <!-- start class org.apache.hadoop.net.ScriptBasedMapping -->
+ <class name="ScriptBasedMapping" extends="org.apache.hadoop.net.CachedDNSToSwitchMapping"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="ScriptBasedMapping"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ScriptBasedMapping" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[This class implements the {@link DNSToSwitchMapping} interface using a
+ script configured via topology.script.file.name .]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.ScriptBasedMapping -->
+ <!-- start class org.apache.hadoop.net.SocketInputStream -->
+ <class name="SocketInputStream" extends="java.io.InputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.ReadableByteChannel"/>
+ <constructor name="SocketInputStream" type="java.nio.channels.ReadableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for reading, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), timeout): <br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketInputStream" type="java.net.Socket"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketInputStream(socket.getChannel(), socket.getSoTimeout())
+ :<br><br>
+
+ Create a new input stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+ @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.ReadableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by inputstream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferFrom(ReadableByteChannel, long, long)}.]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="read" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="waitForReadable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[waits for the underlying channel to be ready for reading.
+ The timeout specified for this stream applies to this wait.
+
+ @throws SocketTimeoutException
+ if select on the channel times out.
+ @throws IOException
+ if any other I/O error occurs.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This implements an input stream that can have a timeout while reading.
+ This sets non-blocking flag on the socket channel.
+ So after create this object, read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} for the associated socket will throw
+ IllegalBlockingModeException.
+ Please use {@link SocketOutputStream} for writing.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketInputStream -->
+ <!-- start class org.apache.hadoop.net.SocketOutputStream -->
+ <class name="SocketOutputStream" extends="java.io.OutputStream"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.nio.channels.WritableByteChannel"/>
+ <constructor name="SocketOutputStream" type="java.nio.channels.WritableByteChannel, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @param channel
+ Channel for writing, should also be a {@link SelectableChannel}.
+ The channel will be configured to be non-blocking.
+ @param timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="SocketOutputStream" type="java.net.Socket, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Same as SocketOutputStream(socket.getChannel(), timeout):<br><br>
+
+ Create a new ouput stream with the given timeout. If the timeout
+ is zero, it will be treated as infinite timeout. The socket's
+ channel will be configured to be non-blocking.
+
+ @see SocketOutputStream#SocketOutputStream(WritableByteChannel, long)
+
+ @param socket should have a channel associated with it.
+ @param timeout timeout timeout in milliseconds. must not be negative.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getChannel" return="java.nio.channels.WritableByteChannel"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns underlying channel used by this stream.
+ This is useful in certain cases like channel for
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}]]>
+ </doc>
+ </method>
+ <method name="isOpen" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="java.nio.ByteBuffer"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="waitForWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[waits for the underlying channel to be ready for writing.
+ The timeout specified for this stream applies to this wait.
+
+ @throws SocketTimeoutException
+ if select on the channel times out.
+ @throws IOException
+ if any other I/O error occurs.]]>
+ </doc>
+ </method>
+ <method name="transferToFully"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fileCh" type="java.nio.channels.FileChannel"/>
+ <param name="position" type="long"/>
+ <param name="count" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Transfers data from FileChannel using
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}.
+
+ Similar to readFully(), this waits till requested amount of
+ data is transfered.
+
+ @param fileCh FileChannel to transfer data from.
+ @param position position within the channel where the transfer begins
+ @param count number of bytes to transfer.
+
+ @throws EOFException
+ If end of input file is reached before requested number of
+ bytes are transfered.
+
+ @throws SocketTimeoutException
+ If this channel blocks transfer longer than timeout for
+ this stream.
+
+ @throws IOException Includes any exception thrown by
+ {@link FileChannel#transferTo(long, long, WritableByteChannel)}.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This implements an output stream that can have a timeout while writing.
+ This sets non-blocking flag on the socket channel.
+ So after creating this object , read() on
+ {@link Socket#getInputStream()} and write() on
+ {@link Socket#getOutputStream()} on the associated socket will throw
+ llegalBlockingModeException.
+ Please use {@link SocketInputStream} for reading.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocketOutputStream -->
+ <!-- start class org.apache.hadoop.net.SocksSocketFactory -->
+ <class name="SocksSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="SocksSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <constructor name="SocksSocketFactory" type="java.net.Proxy"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with a supplied Proxy
+
+ @param proxy the proxy to use to create sockets]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.SocksSocketFactory -->
+ <!-- start class org.apache.hadoop.net.StandardSocketFactory -->
+ <class name="StandardSocketFactory" extends="javax.net.SocketFactory"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StandardSocketFactory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default empty constructor (for use with the reflection API).]]>
+ </doc>
+ </constructor>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="addr" type="java.net.InetAddress"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="createSocket" return="java.net.Socket"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="host" type="java.lang.String"/>
+ <param name="port" type="int"/>
+ <param name="localHostAddr" type="java.net.InetAddress"/>
+ <param name="localPort" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.net.StandardSocketFactory -->
+</package>
+<package name="org.apache.hadoop.record">
+ <!-- start class org.apache.hadoop.record.BinaryRecordInput -->
+ <class name="BinaryRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="BinaryRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordInput" type="java.io.DataInput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inp" type="java.io.DataInput"/>
+ <doc>
+ <![CDATA[Get a thread-local record input for the supplied DataInput.
+ @param inp data input stream
+ @return binary record input corresponding to the supplied DataInput.]]>
+ </doc>
+ </method>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordInput -->
+ <!-- start class org.apache.hadoop.record.BinaryRecordOutput -->
+ <class name="BinaryRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="BinaryRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <constructor name="BinaryRecordOutput" type="java.io.DataOutput"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of BinaryRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="get" return="org.apache.hadoop.record.BinaryRecordOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <doc>
+ <![CDATA[Get a thread-local record output for the supplied DataOutput.
+ @param out data output stream
+ @return binary record output corresponding to the supplied DataOutput.]]>
+ </doc>
+ </method>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.BinaryRecordOutput -->
+ <!-- start class org.apache.hadoop.record.Buffer -->
+ <class name="Buffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Buffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a zero-count sequence.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte array as the initial value.
+
+ @param bytes This array becomes the backing storage for the object.]]>
+ </doc>
+ </constructor>
+ <constructor name="Buffer" type="byte[], int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a Buffer using the byte range as the initial value.
+
+ @param bytes Copy of this array becomes the backing storage for the object.
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Use the specified bytes array as underlying sequence.
+
+ @param bytes byte sequence]]>
+ </doc>
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Copy the specified byte array to the Buffer. Replaces the current buffer.
+
+ @param bytes byte array to be assigned
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="get" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the data from the Buffer.
+
+ @return The data is only valid between 0 and getCount() - 1.]]>
+ </doc>
+ </method>
+ <method name="getCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current count of the buffer.]]>
+ </doc>
+ </method>
+ <method name="getCapacity" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the capacity, which is the maximum count that could handled without
+ resizing the backing storage.
+
+ @return The number of bytes]]>
+ </doc>
+ </method>
+ <method name="setCapacity"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newCapacity" type="int"/>
+ <doc>
+ <![CDATA[Change the capacity of the backing storage.
+ The data is preserved if newCapacity >= getCount().
+ @param newCapacity The new capacity in bytes.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Reset the buffer to 0 size]]>
+ </doc>
+ </method>
+ <method name="truncate"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Change the capacity of the backing store to be the same as the current
+ count of buffer.]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="length" type="int"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer.
+
+ @param bytes byte array to be appended
+ @param offset offset into byte array
+ @param length length of data]]>
+ </doc>
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Append specified bytes to the buffer
+
+ @param bytes byte array to be appended]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Define the sort order of the Buffer.
+
+ @param other The other buffer
+ @return Positive if this is bigger than other, 0 if they are equal, and
+ negative if this is smaller than other.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="charsetName" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ <doc>
+ <![CDATA[Convert the byte buffer to a string an specific character encoding
+
+ @param charsetName Valid Java Character Set Name]]>
+ </doc>
+ </method>
+ <method name="clone" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="CloneNotSupportedException" type="java.lang.CloneNotSupportedException"/>
+ </method>
+ <doc>
+ <![CDATA[A byte sequence that is used as a Java native type for buffer.
+ It is resizable and distinguishes between the count of the seqeunce and
+ the current capacity.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Buffer -->
+ <!-- start class org.apache.hadoop.record.CsvRecordInput -->
+ <class name="CsvRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="CsvRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordInput -->
+ <!-- start class org.apache.hadoop.record.CsvRecordOutput -->
+ <class name="CsvRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="CsvRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of CsvRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.CsvRecordOutput -->
+ <!-- start interface org.apache.hadoop.record.Index -->
+ <interface name="Index" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="done" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="incr"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Interface that acts as an iterator for deserializing maps.
+ The deserializer returns an instance that the record uses to
+ read vectors and maps. An example of usage is as follows:
+
+ <code>
+ Index idx = startVector(...);
+ while (!idx.done()) {
+ .... // read element of a vector
+ idx.incr();
+ }
+ </code>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.Index -->
+ <!-- start class org.apache.hadoop.record.Record -->
+ <class name="Record" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="Record"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="serialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record with tag (ususally field name)
+ @param rout Record output destination
+ @param tag record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record with a tag (usually field name)
+ @param rin Record input source
+ @param tag Record tag (Used only in tagged serialization e.g. XML)]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize a record without a tag
+ @param rout Record output destination]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize a record without a tag
+ @param rin Record input source]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="din" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Abstract class that is extended by generated classes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Record -->
+ <!-- start class org.apache.hadoop.record.RecordComparator -->
+ <class name="RecordComparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordComparator" type="java.lang.Class"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a raw {@link Record} comparison implementation.]]>
+ </doc>
+ </constructor>
+ <method name="compare" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <method name="define"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="comparator" type="org.apache.hadoop.record.RecordComparator"/>
+ <doc>
+ <![CDATA[Register an optimized comparator for a {@link Record} implementation.
+
+ @param c record classs for which a raw comparator is provided
+ @param comparator Raw comparator instance for class c]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A raw record comparator base class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.RecordComparator -->
+ <!-- start interface org.apache.hadoop.record.RecordInput -->
+ <interface name="RecordInput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a byte from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a boolean from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read an integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a long integer from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a single-precision float from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a double-precision number from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a UTF-8 encoded string from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read byte array from serialized record.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return value read from serialized record.]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized record.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of elements.]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized vector.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for start of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)
+ @return Index that is used to count the number of map entries.]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check the mark for end of the serialized map.
+ @param tag Used by tagged serialization formats (such as XML)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that all the Deserializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordInput -->
+ <!-- start interface org.apache.hadoop.record.RecordOutput -->
+ <interface name="RecordOutput" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a byte to serialized record.
+ @param b Byte to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a boolean to serialized record.
+ @param b Boolean to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write an integer to serialized record.
+ @param i Integer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a long integer to serialized record.
+ @param l Long to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a single-precision float to serialized record.
+ @param f Float to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a double precision floating point number to serialized record.
+ @param d Double to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a unicode string to serialized record.
+ @param s String to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a buffer to serialized record.
+ @param buf Buffer to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a record to be serialized.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized record.
+ @param r Record to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a vector to be serialized.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized vector.
+ @param v Vector to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the start of a map to be serialized.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="m" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark the end of a serialized map.
+ @param m Map to be serialized
+ @param tag Used by tagged serialization formats (such as XML)
+ @throws IOException Indicates error in serialization]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface that alll the serializers have to implement.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.RecordOutput -->
+ <!-- start class org.apache.hadoop.record.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a float from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <doc>
+ <![CDATA[Parse a double from a byte array.]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="readVLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded long from a stream and return it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized long]]>
+ </doc>
+ </method>
+ <method name="readVInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads a zero-compressed encoded integer from a stream and returns it.
+ @param in input stream
+ @throws java.io.IOException
+ @return deserialized integer]]>
+ </doc>
+ </method>
+ <method name="getVIntSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="long"/>
+ <doc>
+ <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+ </doc>
+ </method>
+ <method name="writeVLong"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 <= i <= 127, only one byte is used with the actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="writeVInt"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.DataOutput"/>
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serializes an int to a binary stream with zero-compressed encoding.
+
+ @param stream Binary output stream
+ @param i int to be serialized
+ @throws java.io.IOException]]>
+ </doc>
+ </method>
+ <method name="compareBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ <doc>
+ <![CDATA[Lexicographic order of binary data.]]>
+ </doc>
+ </method>
+ <field name="hexchars" type="char[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O runtime.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.Utils -->
+ <!-- start class org.apache.hadoop.record.XmlRecordInput -->
+ <class name="XmlRecordInput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordInput"/>
+ <constructor name="XmlRecordInput" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordInput]]>
+ </doc>
+ </constructor>
+ <method name="readByte" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBool" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readInt" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readLong" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFloat" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readDouble" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readBuffer" return="org.apache.hadoop.record.Buffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap" return="org.apache.hadoop.record.Index"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Deserializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordInput -->
+ <!-- start class org.apache.hadoop.record.XmlRecordOutput -->
+ <class name="XmlRecordOutput" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.RecordOutput"/>
+ <constructor name="XmlRecordOutput" type="java.io.OutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of XmlRecordOutput]]>
+ </doc>
+ </constructor>
+ <method name="writeByte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="boolean"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeInt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeLong"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="l" type="long"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeFloat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="f" type="float"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeDouble"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeString"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="org.apache.hadoop.record.Buffer"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="r" type="org.apache.hadoop.record.Record"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.ArrayList"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="startMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="endMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="v" type="java.util.TreeMap"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[XML Serializer.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.XmlRecordOutput -->
+</package>
+<package name="org.apache.hadoop.record.compiler">
+ <!-- start class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <class name="CodeBuffer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A wrapper around StringBuffer that automatically does indentation]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.CodeBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.Consts -->
+ <class name="Consts" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="RIO_PREFIX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_VAR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RTI_FILTER_FIELDS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_OUTPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_INPUT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TAG" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[const definitions for Record I/O compiler]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.Consts -->
+ <!-- start class org.apache.hadoop.record.compiler.JBoolean -->
+ <class name="JBoolean" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBoolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBoolean]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBoolean -->
+ <!-- start class org.apache.hadoop.record.compiler.JBuffer -->
+ <class name="JBuffer" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JBuffer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JBuffer]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "buffer" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JBuffer -->
+ <!-- start class org.apache.hadoop.record.compiler.JByte -->
+ <class name="JByte" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JByte"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "byte" type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JByte -->
+ <!-- start class org.apache.hadoop.record.compiler.JDouble -->
+ <class name="JDouble" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JDouble"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JDouble]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JDouble -->
+ <!-- start class org.apache.hadoop.record.compiler.JField -->
+ <class name="JField" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JField" type="java.lang.String, java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JField]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[A thin wrappper around record field.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JField -->
+ <!-- start class org.apache.hadoop.record.compiler.JFile -->
+ <class name="JFile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFile" type="java.lang.String, java.util.ArrayList, java.util.ArrayList"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFile
+
+ @param name possibly full pathname to the file
+ @param inclFiles included files (as JFile)
+ @param recList List of records defined within this file]]>
+ </doc>
+ </constructor>
+ <method name="genCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <param name="destDir" type="java.lang.String"/>
+ <param name="options" type="java.util.ArrayList"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate record code in given language. Language should be all
+ lowercase.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Container for the Hadoop Record DDL.
+ The main components of the file are filename, list of included files,
+ and records defined in that file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFile -->
+ <!-- start class org.apache.hadoop.record.compiler.JFloat -->
+ <class name="JFloat" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JFloat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JFloat]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JFloat -->
+ <!-- start class org.apache.hadoop.record.compiler.JInt -->
+ <class name="JInt" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JInt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JInt]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "int" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JInt -->
+ <!-- start class org.apache.hadoop.record.compiler.JLong -->
+ <class name="JLong" extends="org.apache.hadoop.record.compiler.JType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JLong"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JLong]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[Code generator for "long" type]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JLong -->
+ <!-- start class org.apache.hadoop.record.compiler.JMap -->
+ <class name="JMap" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JMap" type="org.apache.hadoop.record.compiler.JType, org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JMap]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JMap -->
+ <!-- start class org.apache.hadoop.record.compiler.JRecord -->
+ <class name="JRecord" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JRecord" type="java.lang.String, java.util.ArrayList"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JRecord]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JRecord -->
+ <!-- start class org.apache.hadoop.record.compiler.JString -->
+ <class name="JString" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JString"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JString]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JString -->
+ <!-- start class org.apache.hadoop.record.compiler.JType -->
+ <class name="JType" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Abstract Base class for all types supported by Hadoop Record I/O.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JType -->
+ <!-- start class org.apache.hadoop.record.compiler.JVector -->
+ <class name="JVector" extends="org.apache.hadoop.record.compiler.JCompType"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JVector" type="org.apache.hadoop.record.compiler.JType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of JVector]]>
+ </doc>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.JVector -->
+</package>
+<package name="org.apache.hadoop.record.compiler.ant">
+ <!-- start class org.apache.hadoop.record.compiler.ant.RccTask -->
+ <class name="RccTask" extends="org.apache.tools.ant.Task"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RccTask"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new instance of RccTask]]>
+ </doc>
+ </constructor>
+ <method name="setLanguage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="language" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the output language option
+ @param language "java"/"c++"]]>
+ </doc>
+ </method>
+ <method name="setFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="file" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets the record definition file attribute
+ @param file record definition file]]>
+ </doc>
+ </method>
+ <method name="setFailonerror"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="flag" type="boolean"/>
+ <doc>
+ <![CDATA[Given multiple files (via fileset), set the error handling behavior
+ @param flag true will throw build exception in case of failure (default)]]>
+ </doc>
+ </method>
+ <method name="setDestdir"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[Sets directory where output files will be generated
+ @param dir output directory]]>
+ </doc>
+ </method>
+ <method name="addFileset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="set" type="org.apache.tools.ant.types.FileSet"/>
+ <doc>
+ <![CDATA[Adds a fileset that can consist of one or more files
+ @param set Set of record definition files]]>
+ </doc>
+ </method>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="BuildException" type="org.apache.tools.ant.BuildException"/>
+ <doc>
+ <![CDATA[Invoke the Hadoop record compiler on each record definition file]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Hadoop record compiler ant Task
+<p> This task takes the given record definition files and compiles them into
+ java or c++
+ files. It is then up to the user to compile the generated files.
+
+ <p> The task requires the <code>file</code> or the nested fileset element to be
+ specified. Optional attributes are <code>language</code> (set the output
+ language, default is "java"),
+ <code>destdir</code> (name of the destination directory for generated java/c++
+ code, default is ".") and <code>failonerror</code> (specifies error handling
+ behavior. default is true).
+ <p><h4>Usage</h4>
+ <pre>
+ &lt;recordcc
+ destdir="${basedir}/gensrc"
+ language="java"&gt;
+ &lt;fileset include="**\/*.jr" /&gt;
+ &lt;/recordcc&gt;
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.ant.RccTask -->
+</package>
+<package name="org.apache.hadoop.record.compiler.generated">
+ <!-- start class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <class name="ParseException" extends="java.lang.Exception"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ParseException" type="org.apache.hadoop.record.compiler.generated.Token, int[][], java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This constructor is used by the method "generateParseException"
+ in the generated parser. Calling this constructor generates
+ a new object of this type with the fields "currentToken",
+ "expectedTokenSequences", and "tokenImage" set. The boolean
+ flag "specialConstructor" is also set to true to indicate that
+ this constructor was used to create this object.
+ This constructor calls its super class with the empty string
+ to force the "toString" method of parent class "Throwable" to
+ print the error message in the form:
+ ParseException: <result of getMessage>]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The following constructors are for use by you for whatever
+ purpose you can think of. Constructing the exception in this
+ manner makes the exception behave in the normal way - i.e., as
+ documented in the class "Throwable". The fields "errorToken",
+ "expectedTokenSequences", and "tokenImage" do not contain
+ relevant information. The JavaCC generated code does not use
+ these constructors.]]>
+ </doc>
+ </constructor>
+ <constructor name="ParseException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This method has the standard behavior when this object has been
+ created using the standard constructors. Otherwise, it uses
+ "currentToken" and "expectedTokenSequences" to generate a parse
+ error message and returns it. If this object has been created
+ due to a parse error, and you do not catch it (it gets thrown
+ from the parser), then this method is called during the printing
+ of the final stack trace, and hence the correct error message
+ gets displayed.]]>
+ </doc>
+ </method>
+ <method name="add_escapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Used to convert raw characters to their escaped version
+ when these raw version cannot be used as part of an ASCII
+ string literal.]]>
+ </doc>
+ </method>
+ <field name="specialConstructor" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This variable determines which constructor was used to create
+ this object and thereby affects the semantics of the
+ "getMessage" method (see below).]]>
+ </doc>
+ </field>
+ <field name="currentToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is the last token that has been consumed successfully. If
+ this object has been created due to a parse error, the token
+ followng this token will (therefore) be the first error token.]]>
+ </doc>
+ </field>
+ <field name="expectedTokenSequences" type="int[][]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Each entry in this array is an array of integers. Each array
+ of integers represents a sequence of tokens (by their ordinal
+ values) that is expected at this point of the parse.]]>
+ </doc>
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This is a reference to the "tokenImage" array of the generated
+ parser within which the parse error occurred. This array is
+ defined in the generated ...Constants interface.]]>
+ </doc>
+ </field>
+ <field name="eol" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The end of line string for this machine.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This exception is thrown when parse errors are encountered.
+ You can explicitly create objects of this exception type by
+ calling the method generateParseException in the generated
+ parser.
+
+ You can modify this class to customize your error reporting
+ mechanisms so long as you retain the public fields.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.ParseException -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <class name="Rcc" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="Rcc" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Rcc" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="usage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="driver" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <method name="Input" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Include" return="org.apache.hadoop.record.compiler.JFile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Module" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ModuleName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="RecordList" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Record" return="org.apache.hadoop.record.compiler.JRecord"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Field" return="org.apache.hadoop.record.compiler.JField"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Type" return="org.apache.hadoop.record.compiler.JType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Map" return="org.apache.hadoop.record.compiler.JMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="Vector" return="org.apache.hadoop.record.compiler.JVector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tm" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"/>
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="index" type="int"/>
+ </method>
+ <method name="generateParseException" return="org.apache.hadoop.record.compiler.generated.ParseException"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="enable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="disable_tracing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="token_source" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="token" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jj_nt" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Rcc -->
+ <!-- start interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <interface name="RccConstants" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="EOF" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MODULE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RECORD_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INCLUDE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BOOLEAN_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RBRACE_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SEMICOLON_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CSTRING_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IDENT_TKN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinOneLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WithinMultiLineComment" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="tokenImage" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </interface>
+ <!-- end interface org.apache.hadoop.record.compiler.generated.RccConstants -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <class name="RccTokenManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setDebugStream"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ds" type="java.io.PrintStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/>
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="SwitchTo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="lexState" type="int"/>
+ </method>
+ <method name="jjFillToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="debugStream" type="java.io.PrintStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjstrLiteralImages" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="lexStateNames" type="java.lang.String[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="jjnewLexState" type="int[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="input_stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="curChar" type="char"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.RccTokenManager -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <class name="SimpleCharStream" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.Reader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </constructor>
+ <constructor name="SimpleCharStream" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setTabSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="getTabSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ </method>
+ <method name="ExpandBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="wrapAround" type="boolean"/>
+ </method>
+ <method name="FillBuff"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="BeginToken" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="UpdateLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="c" type="char"/>
+ </method>
+ <method name="readChar" return="char"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getEndColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getEndLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginColumn" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBeginLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="backup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="amount" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.Reader"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <param name="buffersize" type="int"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="encoding" type="java.lang.String"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/>
+ </method>
+ <method name="ReInit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dstream" type="java.io.InputStream"/>
+ <param name="startline" type="int"/>
+ <param name="startcolumn" type="int"/>
+ </method>
+ <method name="GetImage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="GetSuffix" return="char[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="int"/>
+ </method>
+ <method name="Done"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="adjustBeginLineColumn"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newLine" type="int"/>
+ <param name="newCol" type="int"/>
+ <doc>
+ <![CDATA[Method to adjust line and column numbers for the start of a token.]]>
+ </doc>
+ </method>
+ <field name="staticFlag" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufpos" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufline" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="bufcolumn" type="int[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="column" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="line" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsCR" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="prevCharIsLF" type="boolean"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputStream" type="java.io.Reader"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="buffer" type="char[]"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="maxNextCharInd" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="inBuf" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="tabSize" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of interface CharStream, where the stream is assumed to
+ contain only ASCII characters (without unicode processing).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.SimpleCharStream -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.Token -->
+ <class name="Token" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Token"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the image.]]>
+ </doc>
+ </method>
+ <method name="newToken" return="org.apache.hadoop.record.compiler.generated.Token"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="ofKind" type="int"/>
+ <doc>
+ <![CDATA[Returns a new Token object, by default. However, if you want, you
+ can create and return subclass objects based on the value of ofKind.
+ Simply add the cases to the switch for all those special cases.
+ For example, if you have a subclass of Token called IDToken that
+ you want to create if ofKind is ID, simlpy add something like :
+
+ case MyParserConstants.ID : return new IDToken();
+
+ to the following switch statement. Then you can cast matchedToken
+ variable to the appropriate type and use it in your lexical actions.]]>
+ </doc>
+ </method>
+ <field name="kind" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[An integer that describes the kind of this token. This numbering
+ system is determined by JavaCCParser, and a table of these numbers is
+ stored in the file ...Constants.java.]]>
+ </doc>
+ </field>
+ <field name="beginLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="beginColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endLine" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="endColumn" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[beginLine and beginColumn describe the position of the first character
+ of this token; endLine and endColumn describe the position of the
+ last character of this token.]]>
+ </doc>
+ </field>
+ <field name="image" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The string image of the token.]]>
+ </doc>
+ </field>
+ <field name="next" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A reference to the next regular (non-special) token from the input
+ stream. If this is the last token from the input stream, or if the
+ token manager has not read tokens beyond this one, this field is
+ set to null. This is true only if this token is also a regular
+ token. Otherwise, see below for a description of the contents of
+ this field.]]>
+ </doc>
+ </field>
+ <field name="specialToken" type="org.apache.hadoop.record.compiler.generated.Token"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[This field is used to access special tokens that occur prior to this
+ token, but after the immediately preceding regular (non-special) token.
+ If there are no such special tokens, this field is set to null.
+ When there are more than one such special token, this field refers
+ to the last of these special tokens, which in turn refers to the next
+ previous special token through its specialToken field, and so on
+ until the first special token (whose specialToken field is null).
+ The next fields of special tokens refer to other special tokens that
+ immediately follow it (without an intervening regular token). If there
+ is no such token, this field is null.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Describes the input token stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.Token -->
+ <!-- start class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+ <class name="TokenMgrError" extends="java.lang.Error"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TokenMgrError"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TokenMgrError" type="boolean, int, int, int, java.lang.String, char, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addEscapes" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Replaces unprintable characters by their espaced (or unicode escaped)
+ equivalents in the given string]]>
+ </doc>
+ </method>
+ <method name="LexicalError" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="EOFSeen" type="boolean"/>
+ <param name="lexState" type="int"/>
+ <param name="errorLine" type="int"/>
+ <param name="errorColumn" type="int"/>
+ <param name="errorAfter" type="java.lang.String"/>
+ <param name="curChar" type="char"/>
+ <doc>
+ <![CDATA[Returns a detailed message for the Error when it is thrown by the
+ token manager to indicate a lexical error.
+ Parameters :
+ EOFSeen : indicates if EOF caused the lexicl error
+ curLexState : lexical state in which this error occured
+ errorLine : line number when the error occured
+ errorColumn : column number when the error occured
+ errorAfter : prefix that was seen before this error occured
+ curchar : the offending character
+ Note: You can customize the lexical error message by modifying this method.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[You can also modify the body of this method to customize your error messages.
+ For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not
+ of end-users concern, so you can return something like :
+
+ "Internal Error : Please file a bug report .... "
+
+ from this method for such cases in the release version of your parser.]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.record.compiler.generated.TokenMgrError -->
+</package>
+<package name="org.apache.hadoop.record.meta">
+ <!-- start class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <class name="FieldTypeInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's TypeID object]]>
+ </doc>
+ </method>
+ <method name="getFieldID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the field's id (name)]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two FieldTypeInfos are equal if ach of their fields matches]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ti" type="org.apache.hadoop.record.meta.FieldTypeInfo"/>
+ </method>
+ <doc>
+ <![CDATA[Represents a type information for a field, which is made up of its
+ ID (name) and its type (a TypeID object).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.FieldTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.MapTypeID -->
+ <class name="MapTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapTypeID" type="org.apache.hadoop.record.meta.TypeID, org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getKeyTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's key element]]>
+ </doc>
+ </method>
+ <method name="getValueTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the TypeID of the map's value element]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two map typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a Map]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.MapTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <class name="RecordTypeInfo" extends="org.apache.hadoop.record.Record"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty RecordTypeInfo object.]]>
+ </doc>
+ </constructor>
+ <constructor name="RecordTypeInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a RecordTypeInfo object representing a record with the given name
+ @param name Name of the record]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return the name of the record]]>
+ </doc>
+ </method>
+ <method name="setName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[set the name of the record]]>
+ </doc>
+ </method>
+ <method name="addField"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fieldName" type="java.lang.String"/>
+ <param name="tid" type="org.apache.hadoop.record.meta.TypeID"/>
+ <doc>
+ <![CDATA[Add a field.
+ @param fieldName Name of the field
+ @param tid Type ID of the field]]>
+ </doc>
+ </method>
+ <method name="getFieldTypeInfos" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a collection of field type infos]]>
+ </doc>
+ </method>
+ <method name="getNestedStructTypeInfo" return="org.apache.hadoop.record.meta.RecordTypeInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the type info of a nested record. We only consider nesting
+ to one level.
+ @param name Name of the nested record]]>
+ </doc>
+ </method>
+ <method name="serialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rout" type="org.apache.hadoop.record.RecordOutput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="deserialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize the type information for a record]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="peer_" type="java.lang.Object"/>
+ <exception name="ClassCastException" type="java.lang.ClassCastException"/>
+ <doc>
+ <![CDATA[This class doesn't implement Comparable as it's not meant to be used
+ for anything besides de/serializing.
+ So we always throw an exception.
+ Not implemented. Always returns 0 if another RecordTypeInfo is passed in.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A record's Type Information object which can read/write itself.
+
+ Type information for a record comprises metadata about the record,
+ as well as a collection of type information for each field in the record.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.RecordTypeInfo -->
+ <!-- start class org.apache.hadoop.record.meta.StructTypeID -->
+ <class name="StructTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StructTypeID" type="org.apache.hadoop.record.meta.RecordTypeInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a StructTypeID based on the RecordTypeInfo of some record]]>
+ </doc>
+ </constructor>
+ <method name="getFieldTypeInfos" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for a struct]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.StructTypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID -->
+ <class name="TypeID" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getTypeVal" return="byte"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the type value. One of the constants in RIOType.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two base typeIDs are equal if they refer to the same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <field name="BoolTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constant classes for the basic types, so we can share them.]]>
+ </doc>
+ </field>
+ <field name="BufferTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ByteTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DoubleTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FloatTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IntTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LongTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="StringTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="typeVal" type="byte"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Represents typeID for basic types.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID -->
+ <!-- start class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <class name="TypeID.RIOType" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TypeID.RIOType"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <field name="BOOL" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BUFFER" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="BYTE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FLOAT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="INT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRUCT" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VECTOR" type="byte"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[constants representing the IDL types we support]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.TypeID.RIOType -->
+ <!-- start class org.apache.hadoop.record.meta.Utils -->
+ <class name="Utils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rin" type="org.apache.hadoop.record.RecordInput"/>
+ <param name="tag" type="java.lang.String"/>
+ <param name="typeID" type="org.apache.hadoop.record.meta.TypeID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[read/skip bytes from stream based on a type]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Various utility functions for Hadooop record I/O platform.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.Utils -->
+ <!-- start class org.apache.hadoop.record.meta.VectorTypeID -->
+ <class name="VectorTypeID" extends="org.apache.hadoop.record.meta.TypeID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VectorTypeID" type="org.apache.hadoop.record.meta.TypeID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getElementTypeID" return="org.apache.hadoop.record.meta.TypeID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Two vector typeIDs are equal if their constituent elements have the
+ same type]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[We use a basic hashcode implementation, since this class will likely not
+ be used as a hashmap key]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Represents typeID for vector.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.record.meta.VectorTypeID -->
+</package>
+<package name="org.apache.hadoop.security">
+ <!-- start class org.apache.hadoop.security.AccessControlException -->
+ <class name="AccessControlException" extends="org.apache.hadoop.fs.permission.AccessControlException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AccessControlException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor is needed for unwrapping from
+ {@link org.apache.hadoop.ipc.RemoteException}.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs an {@link AccessControlException}
+ with the specified detail message.
+ @param s the detail message.]]>
+ </doc>
+ </constructor>
+ <constructor name="AccessControlException" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new exception with the specified cause and a detail
+ message of <tt>(cause==null ? null : cause.toString())</tt> (which
+ typically contains the class and detail message of <tt>cause</tt>).
+ @param cause the cause (which is saved for later retrieval by the
+ {@link #getCause()} method). (A <tt>null</tt> value is
+ permitted, and indicates that the cause is nonexistent or
+ unknown.)]]>
+ </doc>
+ </constructor>
+ <doc>
+ <![CDATA[An exception class for access control related issues.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.AccessControlException -->
+ <!-- start class org.apache.hadoop.security.Group -->
+ <class name="Group" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.security.Principal"/>
+ <constructor name="Group" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new <code>Group</code> with the given groupname.
+ @param group group name]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <doc>
+ <![CDATA[A group to which a user belongs to.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.Group -->
+ <!-- start class org.apache.hadoop.security.SecurityUtil -->
+ <class name="SecurityUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SecurityUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setPolicy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="policy" type="java.security.Policy"/>
+ <doc>
+ <![CDATA[Set the global security policy for Hadoop.
+
+ @param policy {@link Policy} used for authorization.]]>
+ </doc>
+ </method>
+ <method name="getPolicy" return="java.security.Policy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current global security policy for Hadoop.
+ @return the current {@link Policy}]]>
+ </doc>
+ </method>
+ <method name="getSubject" return="javax.security.auth.Subject"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <doc>
+ <![CDATA[Get the {@link Subject} for the user identified by <code>ugi</code>.
+ @param ugi user
+ @return the {@link Subject} for the user identified by <code>ugi</code>]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.security.SecurityUtil -->
+ <!-- start class org.apache.hadoop.security.SecurityUtil.AccessControlList -->
+ <class name="SecurityUtil.AccessControlList" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SecurityUtil.AccessControlList" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a new ACL from a String representation of the same.
+
+ The String is a a comma separated list of users and groups.
+ The user list comes first and is separated by a space followed
+ by the group list. For e.g. "user1,user2 group1,group2"
+
+ @param aclString String representation of the ACL]]>
+ </doc>
+ </constructor>
+ <method name="allAllowed" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUsers" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getGroups" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="WILDCARD_ACL_VALUE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Class representing a configured access control list.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.SecurityUtil.AccessControlList -->
+ <!-- start class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <class name="UnixUserGroupInformation" extends="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UnixUserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameters user name and its group names.
+ The first entry in the groups list is the default group.
+
+ @param userName a user's name
+ @param groupNames groups list, first of which is the default group
+ @exception IllegalArgumentException if any argument is null]]>
+ </doc>
+ </constructor>
+ <constructor name="UnixUserGroupInformation" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor with parameter user/group names
+
+ @param ugi an array containing user/group names, the first
+ element of which is the user name, the second of
+ which is the default group name.
+ @exception IllegalArgumentException if the array size is less than 2
+ or any element is null.]]>
+ </doc>
+ </constructor>
+ <method name="createImmutable" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Create an immutable {@link UnixUserGroupInformation} object.]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an array of group names]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the user's name]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Deserialize this object
+ First check if this is a UGI in the string format.
+ If no, throw an IOException; otherwise
+ set this object's fields by reading them from the given data input
+
+ @param in input stream
+ @exception IOException is thrown if encounter any error when reading]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Serialize this object
+ First write a string marking that this is a UGI in the string format,
+ then write this object's serialized form to the given data output
+
+ @param out output stream
+ @exception IOException if encounter any error during writing]]>
+ </doc>
+ </method>
+ <method name="saveToConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <param name="ugi" type="org.apache.hadoop.security.UnixUserGroupInformation"/>
+ <doc>
+ <![CDATA[Store the given <code>ugi</code> as a comma separated string in
+ <code>conf</code> as a property <code>attr</code>
+
+ The String starts with the user name followed by the default group names,
+ and other group names.
+
+ @param conf configuration
+ @param attr property name
+ @param ugi a UnixUserGroupInformation]]>
+ </doc>
+ </method>
+ <method name="readFromConf" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attr" type="java.lang.String"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Read a UGI from the given <code>conf</code>
+
+ The object is expected to store with the property name <code>attr</code>
+ as a comma separated string that starts
+ with the user name followed by group names.
+ If the property name is not defined, return null.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise, construct a UGI from the configuration, store it in the
+ ugi map and return it.
+
+ @param conf configuration
+ @param attr property name
+ @return a UnixUGI
+ @throws LoginException if the stored string is ill-formatted.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get current user's name and the names of all its groups from Unix.
+ It's assumed that there is only one UGI per user. If this user already
+ has a UGI in the ugi map, return the ugi in the map.
+ Otherwise get the current user's information from Unix, store it
+ in the map, and return it.
+
+ If the current user's UNIX username or groups are configured in such a way
+ to throw an Exception, for example if the user uses LDAP, then this method
+ will use a the {@link #DEFAULT_USERNAME} and {@link #DEFAULT_GROUP}
+ constants.]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Equivalent to login(conf, false).]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="save" type="boolean"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Get a user's name & its group names from the given configuration;
+ If it is not defined in the configuration, get the current user's
+ information from Unix.
+ If the user has a UGI in the ugi map, return the one in
+ the UGI map.
+
+ @param conf either a job configuration or client's configuration
+ @param save saving it to conf?
+ @return UnixUserGroupInformation a user/group information
+ @exception LoginException if not able to get the user/group information]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Decide if two UGIs are the same
+
+ @param other other object
+ @return true if they are the same; false otherwise.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a hash code for this UGI.
+ The hash code for a UGI is the hash code of its user name string.
+
+ @return a hash code value for this UGI.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert this object to a string
+
+ @return a comma separated string containing the user name and group names]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="DEFAULT_USERNAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEFAULT_GROUP" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="UGI_PROPERTY_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An implementation of UserGroupInformation in the Unix system]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UnixUserGroupInformation -->
+ <!-- start class org.apache.hadoop.security.User -->
+ <class name="User" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.security.Principal"/>
+ <constructor name="User" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new <code>User</code> with the given username.
+ @param user user name]]>
+ </doc>
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <doc>
+ <![CDATA[The username of a user.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.User -->
+ <!-- start class org.apache.hadoop.security.UserGroupInformation -->
+ <class name="UserGroupInformation" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.security.Principal"/>
+ <constructor name="UserGroupInformation"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCurrentUGI" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="setCurrentUGI"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link #setCurrentUser(UserGroupInformation)}">
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <doc>
+ <![CDATA[Set the {@link UserGroupInformation} for the current thread
+ @deprecated Use {@link #setCurrentUser(UserGroupInformation)}]]>
+ </doc>
+ </method>
+ <method name="setCurrentUser"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+ <doc>
+ <![CDATA[Set the {@link UserGroupInformation} for the current thread
+ WARNING - This method should be used only in test cases and other exceptional
+ cases!
+ @param ugi {@link UserGroupInformation} for the current thread]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get username
+
+ @return the user's name]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the groups that the user belong to
+
+ @return an array of group names]]>
+ </doc>
+ </method>
+ <method name="login" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="LoginException" type="javax.security.auth.login.LoginException"/>
+ <doc>
+ <![CDATA[Login and return a UserGroupInformation object.]]>
+ </doc>
+ </method>
+ <method name="readFrom" return="org.apache.hadoop.security.UserGroupInformation"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a {@link UserGroupInformation} from conf]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A {@link Writable} abstract class for storing user and groups information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.UserGroupInformation -->
+</package>
+<package name="org.apache.hadoop.security.authorize">
+ <!-- start class org.apache.hadoop.security.authorize.AuthorizationException -->
+ <class name="AuthorizationException" extends="org.apache.hadoop.security.AccessControlException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="AuthorizationException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="AuthorizationException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="AuthorizationException" type="java.lang.Throwable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a new exception with the specified cause and a detail
+ message of <tt>(cause==null ? null : cause.toString())</tt> (which
+ typically contains the class and detail message of <tt>cause</tt>).
+ @param cause the cause (which is saved for later retrieval by the
+ {@link #getCause()} method). (A <tt>null</tt> value is
+ permitted, and indicates that the cause is nonexistent or
+ unknown.)]]>
+ </doc>
+ </constructor>
+ <method name="getStackTrace" return="java.lang.StackTraceElement[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="printStackTrace"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="printStackTrace"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.io.PrintStream"/>
+ </method>
+ <method name="printStackTrace"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.io.PrintWriter"/>
+ </method>
+ <doc>
+ <![CDATA[An exception class for authorization-related issues.
+
+ This class <em>does not</em> provide the stack trace for security purposes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.authorize.AuthorizationException -->
+ <!-- start class org.apache.hadoop.security.authorize.ConfiguredPolicy -->
+ <class name="ConfiguredPolicy" extends="java.security.Policy"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="ConfiguredPolicy" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.security.authorize.PolicyProvider"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="implies" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="domain" type="java.security.ProtectionDomain"/>
+ <param name="permission" type="java.security.Permission"/>
+ </method>
+ <method name="getPermissions" return="java.security.PermissionCollection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="domain" type="java.security.ProtectionDomain"/>
+ </method>
+ <method name="refresh"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="HADOOP_POLICY_FILE" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A {@link Configuration} based security {@link Policy} for Hadoop.
+
+ {@link ConfiguredPolicy} works in conjunction with a {@link PolicyProvider}
+ for providing service-level authorization for Hadoop.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.authorize.ConfiguredPolicy -->
+ <!-- start class org.apache.hadoop.security.authorize.ConnectionPermission -->
+ <class name="ConnectionPermission" extends="java.security.Permission"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ConnectionPermission" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@link ConnectionPermission} for a given service.
+ @param protocol service to be accessed]]>
+ </doc>
+ </constructor>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <method name="getActions" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="implies" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="permission" type="java.security.Permission"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[{@link Permission} to initiate a connection to a given service.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.authorize.ConnectionPermission -->
+ <!-- start class org.apache.hadoop.security.authorize.PolicyProvider -->
+ <class name="PolicyProvider" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PolicyProvider"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getServices" return="org.apache.hadoop.security.authorize.Service[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Service} definitions from the {@link PolicyProvider}.
+ @return the {@link Service} definitions]]>
+ </doc>
+ </method>
+ <field name="POLICY_PROVIDER_CONFIG" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Configuration key for the {@link PolicyProvider} implementation.]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_POLICY_PROVIDER" type="org.apache.hadoop.security.authorize.PolicyProvider"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A default {@link PolicyProvider} without any defined services.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[{@link PolicyProvider} provides the {@link Service} definitions to the
+ security {@link Policy} in effect for Hadoop.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.authorize.PolicyProvider -->
+ <!-- start interface org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol -->
+ <interface name="RefreshAuthorizationPolicyProtocol" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+ <method name="refreshServiceAcl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Refresh the service-level authorization policy in-effect.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="versionID" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version 1: Initial version]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Protocol which is used to refresh the authorization policy in use currently.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol -->
+ <!-- start class org.apache.hadoop.security.authorize.Service -->
+ <class name="Service" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Service" type="java.lang.String, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getServiceKey" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configuration key for the service.
+ @return the configuration key for the service]]>
+ </doc>
+ </method>
+ <method name="getPermission" return="java.security.Permission"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Permission} required to access the service.
+ @return the {@link Permission} required to access the service]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An abstract definition of <em>service</em> as related to
+ Service Level Authorization for Hadoop.
+
+ Each service defines it's configuration key and also the necessary
+ {@link Permission} required to access the service.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.authorize.Service -->
+ <!-- start class org.apache.hadoop.security.authorize.ServiceAuthorizationManager -->
+ <class name="ServiceAuthorizationManager" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ServiceAuthorizationManager"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="authorize"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="javax.security.auth.Subject"/>
+ <param name="protocol" type="java.lang.Class"/>
+ <exception name="AuthorizationException" type="org.apache.hadoop.security.authorize.AuthorizationException"/>
+ <doc>
+ <![CDATA[Authorize the user to access the protocol being used.
+
+ @param user user accessing the service
+ @param protocol service being accessed
+ @throws AuthorizationException on authorization failure]]>
+ </doc>
+ </method>
+ <field name="SERVICE_AUTHORIZATION_CONFIG" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Configuration key for controlling service-level authorization for Hadoop.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An authorization manager which handles service-level authorization
+ for incoming service requests.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.security.authorize.ServiceAuthorizationManager -->
+</package>
+<package name="org.apache.hadoop.util">
+ <!-- start class org.apache.hadoop.util.CyclicIteration -->
+ <class name="CyclicIteration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable"/>
+ <constructor name="CyclicIteration" type="java.util.NavigableMap, java.lang.Object"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an {@link Iterable} object,
+ so that an {@link Iterator} can be created
+ for iterating the given {@link NavigableMap}.
+ The iteration begins from the starting key exclusively.]]>
+ </doc>
+ </constructor>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Provide an cyclic {@link Iterator} for a {@link NavigableMap}.
+ The {@link Iterator} navigates the entries of the map
+ according to the map's ordering.
+ If the {@link Iterator} hits the last entry of the map,
+ it will then continue from the first entry.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.CyclicIteration -->
+ <!-- start class org.apache.hadoop.util.Daemon -->
+ <class name="Daemon" extends="java.lang.Thread"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Daemon"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread.]]>
+ </doc>
+ </constructor>
+ <constructor name="Daemon" type="java.lang.ThreadGroup, java.lang.Runnable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a daemon thread to be part of a specified thread group.]]>
+ </doc>
+ </constructor>
+ <method name="getRunnable" return="java.lang.Runnable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A thread that has called {@link Thread#setDaemon(boolean) } with true.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Daemon -->
+ <!-- start class org.apache.hadoop.util.DataChecksum -->
+ <class name="DataChecksum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.util.zip.Checksum"/>
+ <method name="newDataChecksum" return="org.apache.hadoop.util.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="int"/>
+ <param name="bytesPerChecksum" type="int"/>
+ </method>
+ <method name="newDataChecksum" return="org.apache.hadoop.util.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <doc>
+ <![CDATA[Creates a DataChecksum from HEADER_LEN bytes from arr[offset].
+ @return DataChecksum of the type in the array or null in case of an error.]]>
+ </doc>
+ </method>
+ <method name="newDataChecksum" return="org.apache.hadoop.util.DataChecksum"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This constructucts a DataChecksum by reading HEADER_LEN bytes from
+ input stream <i>in</i>]]>
+ </doc>
+ </method>
+ <method name="writeHeader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the checksum header to the output stream <i>out</i>.]]>
+ </doc>
+ </method>
+ <method name="getHeader" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="writeValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutputStream"/>
+ <param name="reset" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the current checksum to the stream.
+ If <i>reset</i> is true, then resets the checksum.
+ @return number of bytes written. Will be equal to getChecksumSize();]]>
+ </doc>
+ </method>
+ <method name="writeValue" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <param name="reset" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes the current checksum to a buffer.
+ If <i>reset</i> is true, then resets the checksum.
+ @return number of bytes written. Will be equal to getChecksumSize();]]>
+ </doc>
+ </method>
+ <method name="compare" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="buf" type="byte[]"/>
+ <param name="offset" type="int"/>
+ <doc>
+ <![CDATA[Compares the checksum located at buf[offset] with the current checksum.
+ @return true if the checksum matches and false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getChecksumType" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getChecksumSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getBytesPerChecksum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumBytesInSum" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getChecksumHeaderSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getValue" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="off" type="int"/>
+ <param name="len" type="int"/>
+ </method>
+ <method name="update"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b" type="int"/>
+ </method>
+ <field name="HEADER_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKSUM_NULL" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CHECKSUM_CRC32" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SIZE_OF_INTEGER" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class provides inteface and utilities for processing checksums for
+ DFS data transfers.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.DataChecksum -->
+ <!-- start class org.apache.hadoop.util.DiskChecker -->
+ <class name="DiskChecker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mkdirsWithExistsCheck" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[The semantics of mkdirsWithExistsCheck method is different from the mkdirs
+ method provided in the Sun's java.io.File class in the following way:
+ While creating the non-existent parent directories, this method checks for
+ the existence of those directories if the mkdir fails at any point (since
+ that directory might have just been created by some other process).
+ If both mkdir() and the exists() check fails for any seemingly
+ non-existent directory, then we signal an error; Sun's mkdir would signal
+ an error (return false) if a directory it is attempting to create already
+ exists or the mkdir fails.
+ @param dir
+ @return true on success, false on failure]]>
+ </doc>
+ </method>
+ <method name="checkDir"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
+ </method>
+ <doc>
+ <![CDATA[Class that provides utility functions for checking disk problem]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <class name="DiskChecker.DiskErrorException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskErrorException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskErrorException -->
+ <!-- start class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <class name="DiskChecker.DiskOutOfSpaceException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DiskChecker.DiskOutOfSpaceException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException -->
+ <!-- start class org.apache.hadoop.util.GenericOptionsParser -->
+ <class name="GenericOptionsParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericOptionsParser" type="org.apache.commons.cli.Options, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an options parser with the given options to parse the args.
+ @param opts the options
+ @param args the command line arguments]]>
+ </doc>
+ </constructor>
+ <constructor name="GenericOptionsParser" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an options parser to parse the args.
+ @param args the command line arguments]]>
+ </doc>
+ </constructor>
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser<code> to parse only the generic Hadoop
+ arguments.
+
+ The array of string arguments other than the generic arguments can be
+ obtained by {@link #getRemainingArgs()}.
+
+ @param conf the <code>Configuration</code> to modify.
+ @param args command-line arguments.]]>
+ </doc>
+ </constructor>
+ <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, org.apache.commons.cli.Options, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a <code>GenericOptionsParser</code> to parse given options as well
+ as generic Hadoop options.
+
+ The resulting <code>CommandLine</code> object can be obtained by
+ {@link #getCommandLine()}.
+
+ @param conf the configuration to modify
+ @param options options built by the caller
+ @param args User-specified arguments]]>
+ </doc>
+ </constructor>
+ <method name="getRemainingArgs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array of Strings containing only application-specific arguments.
+
+ @return array of <code>String</code>s containing the un-parsed arguments
+ or <strong>empty array</strong> if commandLine was not defined.]]>
+ </doc>
+ </method>
+ <method name="getConfiguration" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the modified configuration
+ @return the configuration that has the modified parameters.]]>
+ </doc>
+ </method>
+ <method name="getCommandLine" return="org.apache.commons.cli.CommandLine"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the commons-cli <code>CommandLine</code> object
+ to process the parsed arguments.
+
+ Note: If the object is created with
+ {@link #GenericOptionsParser(Configuration, String[])}, then returned
+ object will only contain parsed generic options.
+
+ @return <code>CommandLine</code> representing list of arguments
+ parsed against Options descriptor.]]>
+ </doc>
+ </method>
+ <method name="getLibJars" return="java.net.URL[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If libjars are set in the conf, parse the libjars.
+ @param conf
+ @return libjar urls
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Print the usage message for generic command-line options supported.
+
+ @param out stream to print the usage message to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>GenericOptionsParser</code> is a utility to parse command line
+ arguments generic to the Hadoop framework.
+
+ <code>GenericOptionsParser</code> recognizes several standarad command
+ line arguments, enabling applications to easily specify a namenode, a
+ jobtracker, additional configuration resources etc.
+
+ <h4 id="GenericOptions">Generic Options</h4>
+
+ <p>The supported generic options are:</p>
+ <p><blockquote><pre>
+ -conf &lt;configuration file&gt; specify a configuration file
+ -D &lt;property=value&gt; use value for given property
+ -fs &lt;local|namenode:port&gt; specify a namenode
+ -jt &lt;local|jobtracker:port&gt; specify a job tracker
+ -files &lt;comma separated list of files&gt; specify comma separated
+ files to be copied to the map reduce cluster
+ -libjars &lt;comma separated list of jars&gt; specify comma separated
+ jar files to include in the classpath.
+ -archives &lt;comma separated list of archives&gt; specify comma
+ separated archives to be unarchived on the compute machines.
+
+ </pre></blockquote></p>
+
+ <p>The general command line syntax is:</p>
+ <p><tt><pre>
+ bin/hadoop command [genericOptions] [commandOptions]
+ </pre></tt></p>
+
+ <p>Generic command line arguments <strong>might</strong> modify
+ <code>Configuration </code> objects, given to constructors.</p>
+
+ <p>The functionality is implemented using Commons CLI.</p>
+
+ <p>Examples:</p>
+ <p><blockquote><pre>
+ $ bin/hadoop dfs -fs darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
+ list /data directory in dfs with namenode darwin:8020
+
+ $ bin/hadoop dfs -conf hadoop-site.xml -ls /data
+ list /data directory in dfs with conf specified in hadoop-site.xml
+
+ $ bin/hadoop job -D mapred.job.tracker=darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt darwin:50020 -submit job.xml
+ submit a job to job tracker darwin:50020
+
+ $ bin/hadoop job -jt local -submit job.xml
+ submit a job to local runner
+
+ $ bin/hadoop jar -libjars testlib.jar
+ -archives test.tgz -files file.txt inputjar args
+ job submission with libjars, files and archives
+ </pre></blockquote></p>
+
+ @see Tool
+ @see ToolRunner]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericOptionsParser -->
+ <!-- start class org.apache.hadoop.util.GenericsUtil -->
+ <class name="GenericsUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="GenericsUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Returns the Class object (of type <code>Class&lt;T&gt;</code>) of the
+ argument of type <code>T</code>.
+ @param <T> The type of the argument
+ @param t the object to get it class
+ @return <code>Class&lt;T&gt;</code>]]>
+ </doc>
+ </method>
+ <method name="toArray" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="c" type="java.lang.Class"/>
+ <param name="list" type="java.util.List"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param c the Class object of the items in the list
+ @param list the list to convert]]>
+ </doc>
+ </method>
+ <method name="toArray" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="list" type="java.util.List"/>
+ <doc>
+ <![CDATA[Converts the given <code>List&lt;T&gt;</code> to a an array of
+ <code>T[]</code>.
+ @param list the list to convert
+ @throws ArrayIndexOutOfBoundsException if the list is empty.
+ Use {@link #toArray(Class, List)} if the list may be empty.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Contains utility methods for dealing with Java Generics.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.GenericsUtil -->
+ <!-- start class org.apache.hadoop.util.HeapSort -->
+ <class name="HeapSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="HeapSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the given range of items using heap sort.
+ {@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of HeapSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.HeapSort -->
+ <!-- start class org.apache.hadoop.util.HostsFileReader -->
+ <class name="HostsFileReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HostsFileReader" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="refresh"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getHosts" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getExcludedHosts" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setIncludesFile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="includesFile" type="java.lang.String"/>
+ </method>
+ <method name="setExcludesFile"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="excludesFile" type="java.lang.String"/>
+ </method>
+ <method name="updateFileNames"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="includesFile" type="java.lang.String"/>
+ <param name="excludesFile" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.util.HostsFileReader -->
+ <!-- start interface org.apache.hadoop.util.IndexedSortable -->
+ <interface name="IndexedSortable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Compare items at the given addresses consistent with the semantics of
+ {@link java.util.Comparator#compare(Object, Object)}.]]>
+ </doc>
+ </method>
+ <method name="swap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <param name="j" type="int"/>
+ <doc>
+ <![CDATA[Swap items at the given addresses.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for collections capable of being sorted by {@link IndexedSorter}
+ algorithms.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSortable -->
+ <!-- start interface org.apache.hadoop.util.IndexedSorter -->
+ <interface name="IndexedSorter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the items accessed through the given IndexedSortable over the given
+ range of logical indices. From the perspective of the sort algorithm,
+ each index between l (inclusive) and r (exclusive) is an addressable
+ entry.
+ @see IndexedSortable#compare
+ @see IndexedSortable#swap]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="l" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[Same as {@link #sort(IndexedSortable,int,int)}, but indicate progress
+ periodically.
+ @see #sort(IndexedSortable,int,int)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface for sort algorithms accepting {@link IndexedSortable} items.
+
+ A sort algorithm implementing this interface may only
+ {@link IndexedSortable#compare} and {@link IndexedSortable#swap} items
+ for a range of indices to effect a sort across that range.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.IndexedSorter -->
+ <!-- start class org.apache.hadoop.util.LineReader -->
+ <class name="LineReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LineReader" type="java.io.InputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ default buffer-size (64k).
+ @param in The input stream
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="LineReader" type="java.io.InputStream, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ given buffer-size.
+ @param in The input stream
+ @param bufferSize Size of the read buffer
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a line reader that reads from the given stream using the
+ <code>io.file.buffer.size</code> specified in the given
+ <code>Configuration</code>.
+ @param in input stream
+ @param conf configuration
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the underlying stream.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <param name="maxLineLength" type="int"/>
+ <param name="maxBytesToConsume" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read one line from the InputStream into the given Text. A line
+ can be terminated by one of the following: '\n' (LF) , '\r' (CR),
+ or '\r\n' (CR+LF). EOF also terminates an otherwise unterminated
+ line.
+
+ @param str the object to store the given line (without newline)
+ @param maxLineLength the maximum number of bytes to store into str;
+ the rest of the line is silently discarded.
+ @param maxBytesToConsume the maximum number of bytes to consume
+ in this call. This is only a hint, because if the line cross
+ this threshold, we allow it to happen. It can overshoot
+ potentially by as much as one buffer length.
+
+ @return the number of bytes read including the (longest) newline
+ found.
+
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <param name="maxLineLength" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @param maxLineLength the maximum number of bytes to store into str.
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <method name="readLine" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read from the InputStream into the given Text.
+ @param str the object to store the given line
+ @return the number of bytes read including the newline
+ @throws IOException if the underlying stream throws]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that provides a line reader from an input stream.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.LineReader -->
+ <!-- start class org.apache.hadoop.util.LinuxMemoryCalculatorPlugin -->
+ <class name="LinuxMemoryCalculatorPlugin" extends="org.apache.hadoop.util.MemoryCalculatorPlugin"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LinuxMemoryCalculatorPlugin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPhysicalMemorySize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getVirtualMemorySize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Test the {@link LinuxMemoryCalculatorPlugin}
+
+ @param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Plugin to calculate virtual and physical memories on Linux systems.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.LinuxMemoryCalculatorPlugin -->
+ <!-- start class org.apache.hadoop.util.MemoryCalculatorPlugin -->
+ <class name="MemoryCalculatorPlugin" extends="org.apache.hadoop.conf.Configured"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MemoryCalculatorPlugin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVirtualMemorySize" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Obtain the total size of the virtual memory present in the system.
+
+ @return virtual memory size in bytes.]]>
+ </doc>
+ </method>
+ <method name="getPhysicalMemorySize" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Obtain the total size of the physical memory present in the system.
+
+ @return physical memory size bytes.]]>
+ </doc>
+ </method>
+ <method name="getMemoryCalculatorPlugin" return="org.apache.hadoop.util.MemoryCalculatorPlugin"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the MemoryCalculatorPlugin from the class name and configure it. If
+ class name is null, this method will try and return a memory calculator
+ plugin available for this system.
+
+ @param clazz class-name
+ @param conf configure the plugin with this.
+ @return MemoryCalculatorPlugin]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Plugin to calculate virtual and physical memories on the system.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.MemoryCalculatorPlugin -->
+ <!-- start class org.apache.hadoop.util.MergeSort -->
+ <class name="MergeSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MergeSort" type="java.util.Comparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="mergeSort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="src" type="int[]"/>
+ <param name="dest" type="int[]"/>
+ <param name="low" type="int"/>
+ <param name="high" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of MergeSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.MergeSort -->
+ <!-- start class org.apache.hadoop.util.NativeCodeLoader -->
+ <class name="NativeCodeLoader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NativeCodeLoader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="isNativeCodeLoaded" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Check if native-hadoop code is loaded for this platform.
+
+ @return <code>true</code> if native-hadoop is loaded,
+ else <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getLoadNativeLibraries" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Return if native hadoop libraries, if present, can be used for this job.
+ @param conf configuration
+
+ @return <code>true</code> if native hadoop libraries, if present, can be
+ used for this job; <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setLoadNativeLibraries"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="loadNativeLibraries" type="boolean"/>
+ <doc>
+ <![CDATA[Set if native hadoop libraries, if present, can be used for this job.
+
+ @param conf configuration
+ @param loadNativeLibraries can native hadoop libraries be loaded]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A helper to load the native hadoop code i.e. libhadoop.so.
+ This handles the fallback to either the bundled libhadoop-Linux-i386-32.so
+ or the default java implementations where appropriate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.NativeCodeLoader -->
+ <!-- start class org.apache.hadoop.util.PlatformName -->
+ <class name="PlatformName" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PlatformName"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPlatformName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete platform as per the java-vm.
+ @return returns the complete platform as per the java-vm.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[A helper class for getting build-info of the java-vm.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PlatformName -->
+ <!-- start class org.apache.hadoop.util.PrintJarMainClass -->
+ <class name="PrintJarMainClass" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PrintJarMainClass"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param args]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A micro-application that prints the main class name out of a jar file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PrintJarMainClass -->
+ <!-- start class org.apache.hadoop.util.PriorityQueue -->
+ <class name="PriorityQueue" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="PriorityQueue"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="lessThan" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="a" type="java.lang.Object"/>
+ <param name="b" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Determines the ordering of objects in this priority queue. Subclasses
+ must define this one method.]]>
+ </doc>
+ </method>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <param name="maxSize" type="int"/>
+ <doc>
+ <![CDATA[Subclass constructors must call this.]]>
+ </doc>
+ </method>
+ <method name="put"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Adds an Object to a PriorityQueue in log(size) time.
+ If one tries to add more objects than maxSize from initialize
+ a RuntimeException (ArrayIndexOutOfBound) is thrown.]]>
+ </doc>
+ </method>
+ <method name="insert" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="element" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Adds element to the PriorityQueue in log(size) time if either
+ the PriorityQueue is not full, or not lessThan(element, top()).
+ @param element
+ @return true if element is added, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="top" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the least element of the PriorityQueue in constant time.]]>
+ </doc>
+ </method>
+ <method name="pop" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes and returns the least element of the PriorityQueue in log(size)
+ time.]]>
+ </doc>
+ </method>
+ <method name="adjustTop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should be called when the Object at top changes values. Still log(n)
+ worst case, but it's at least twice as fast to <pre>
+ { pq.top().change(); pq.adjustTop(); }
+ </pre> instead of <pre>
+ { o = pq.pop(); o.change(); pq.push(o); }
+ </pre>]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of elements currently stored in the PriorityQueue.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Removes all entries from the PriorityQueue.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A PriorityQueue maintains a partial ordering of its elements such that the
+ least element can always be found in constant time. Put()'s and pop()'s
+ require log(size) time.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.PriorityQueue -->
+ <!-- start class org.apache.hadoop.util.ProcfsBasedProcessTree -->
+ <class name="ProcfsBasedProcessTree" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ProcfsBasedProcessTree" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setSigKillInterval"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="interval" type="long"/>
+ </method>
+ <method name="isAvailable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Checks if the ProcfsBasedProcessTree is available on this system.
+
+ @return true if ProcfsBasedProcessTree is available. False otherwise.]]>
+ </doc>
+ </method>
+ <method name="getProcessTree" return="org.apache.hadoop.util.ProcfsBasedProcessTree"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the process-tree with latest state. If the root-process is not alive,
+ an empty tree will be returned.
+
+ @return the process-tree with latest state.]]>
+ </doc>
+ </method>
+ <method name="isAlive" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the process-tree alive? Currently we care only about the status of the
+ root-process.
+
+ @return true if the process-true is alive, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="destroy"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Destroy the process-tree. Currently we only make sure the root process is
+ gone. It is the responsibility of the root process to make sure that all
+ its descendants are cleaned up.]]>
+ </doc>
+ </method>
+ <method name="getCumulativeVmem" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the cumulative virtual memory used by all the processes in the
+ process-tree.
+
+ @return cumulative virtual memory used by the process-tree in bytes.]]>
+ </doc>
+ </method>
+ <method name="getPidFromPidFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pidFileName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get PID from a pid-file.
+
+ @param pidFileName
+ Name of the pid-file.
+ @return the PID string read from the pid-file. Returns null if the
+ pidFileName points to a non-existing file or if read fails from the
+ file.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a string printing PIDs of process present in the
+ ProcfsBasedProcessTree. Output format : [pid pid ..]]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_SLEEPTIME_BEFORE_SIGKILL" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Proc file-system based ProcessTree. Works only on Linux.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ProcfsBasedProcessTree -->
+ <!-- start class org.apache.hadoop.util.ProgramDriver -->
+ <class name="ProgramDriver" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ProgramDriver"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <param name="mainClass" type="java.lang.Class"/>
+ <param name="description" type="java.lang.String"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is the method that adds the classed to the repository
+ @param name The name of the string you want the class instance to be called with
+ @param mainClass The class that you want to add to the repository
+ @param description The description of the class
+ @throws NoSuchMethodException
+ @throws SecurityException]]>
+ </doc>
+ </method>
+ <method name="driver"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[This is a driver for the example programs.
+ It looks at the first command line argument and tries to find an
+ example program with that name.
+ If it is found, it calls the main method in that class with the rest
+ of the command line arguments.
+ @param args The argument from the user. args[0] is the command to run.
+ @throws NoSuchMethodException
+ @throws SecurityException
+ @throws IllegalAccessException
+ @throws IllegalArgumentException
+ @throws Throwable Anything thrown by the example program's main]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A driver that is used to run programs added to it]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ProgramDriver -->
+ <!-- start class org.apache.hadoop.util.Progress -->
+ <class name="Progress" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Progress"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates a new root node.]]>
+ </doc>
+ </constructor>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Adds a named node to the tree.]]>
+ </doc>
+ </method>
+ <method name="addPhase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds a node to the tree.]]>
+ </doc>
+ </method>
+ <method name="startNextPhase"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Called during execution to move to the next phase at this level in the
+ tree.]]>
+ </doc>
+ </method>
+ <method name="phase" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the current sub-node executing.]]>
+ </doc>
+ </method>
+ <method name="complete"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Completes this node, moving the parent node to its next child.]]>
+ </doc>
+ </method>
+ <method name="set"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="progress" type="float"/>
+ <doc>
+ <![CDATA[Called during execution on a leaf node to set its progress.]]>
+ </doc>
+ </method>
+ <method name="get" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the overall progress of the root.]]>
+ </doc>
+ </method>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Utility to assist with generation of progress reports. Applications build
+ a hierarchy of {@link Progress} instances, each modelling a phase of
+ execution. The root is constructed with {@link #Progress()}. Nodes for
+ sub-phases are created by calling {@link #addPhase()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Progress -->
+ <!-- start interface org.apache.hadoop.util.Progressable -->
+ <interface name="Progressable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Report progress to the Hadoop framework.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A facility for reporting progress.
+
+ <p>Clients and/or applications can use the provided <code>Progressable</code>
+ to explicitly report progress to the Hadoop framework. This is especially
+ important for operations which take an insignificant amount of time since,
+ in-lieu of the reported progress, the framework has to assume that an error
+ has occured and time-out the operation.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Progressable -->
+ <!-- start class org.apache.hadoop.util.QuickSort -->
+ <class name="QuickSort" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.IndexedSorter"/>
+ <constructor name="QuickSort"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getMaxDepth" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="x" type="int"/>
+ <doc>
+ <![CDATA[Deepest recursion before giving up and doing a heapsort.
+ Returns 2 * ceil(log(n)).]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <doc>
+ <![CDATA[Sort the given range of items using quick sort.
+ {@inheritDoc} If the recursion depth falls below {@link #getMaxDepth},
+ then switch to {@link HeapSort}.]]>
+ </doc>
+ </method>
+ <method name="sort"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.util.IndexedSortable"/>
+ <param name="p" type="int"/>
+ <param name="r" type="int"/>
+ <param name="rep" type="org.apache.hadoop.util.Progressable"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An implementation of the core algorithm of QuickSort.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.QuickSort -->
+ <!-- start class org.apache.hadoop.util.ReflectionUtils -->
+ <class name="ReflectionUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReflectionUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theObject" type="java.lang.Object"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Check and set 'configuration' if necessary.
+
+ @param theObject object for which to set configuration
+ @param conf Configuration]]>
+ </doc>
+ </method>
+ <method name="newInstance" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Create an object for the given class and initialize it from conf
+
+ @param theClass class of which an object is created
+ @param conf Configuration
+ @return a new object]]>
+ </doc>
+ </method>
+ <method name="setContentionTracing"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="boolean"/>
+ </method>
+ <method name="printThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="stream" type="java.io.PrintWriter"/>
+ <param name="title" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Print all of the thread's information and stack traces.
+
+ @param stream the stream to
+ @param title a string title for the stack trace]]>
+ </doc>
+ </method>
+ <method name="logThreadInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <param name="title" type="java.lang.String"/>
+ <param name="minInterval" type="long"/>
+ <doc>
+ <![CDATA[Log the current thread stacks at INFO level.
+ @param log the logger that logs the stack trace
+ @param title a descriptive title for the call stacks
+ @param minInterval the minimum time from the last]]>
+ </doc>
+ </method>
+ <method name="getClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return the correctly-typed {@link Class} of the given object.
+
+ @param o object whose correctly-typed <code>Class</code> is to be obtained
+ @return the correctly typed <code>Class</code> of the given object.]]>
+ </doc>
+ </method>
+ <method name="copy" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="src" type="java.lang.Object"/>
+ <param name="dst" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Make a copy of the writable object using serialization to a buffer
+ @param dst the object to copy from
+ @param src the object to copy into, which is destroyed
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cloneWritableInto"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.io.Writable"/>
+ <param name="src" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[General reflection utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ReflectionUtils -->
+ <!-- start class org.apache.hadoop.util.RunJar -->
+ <class name="RunJar" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RunJar"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="unJar"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jarFile" type="java.io.File"/>
+ <param name="toDir" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unpack a jar file into a directory.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Throwable" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Run a Hadoop job jar. If the main class is not in the jar's manifest,
+ then it must be provided on the command line.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Run a Hadoop job jar.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.RunJar -->
+ <!-- start class org.apache.hadoop.util.ServletUtil -->
+ <class name="ServletUtil" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ServletUtil"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initHTML" return="java.io.PrintWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="response" type="javax.servlet.ServletResponse"/>
+ <param name="title" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Initial HTML header]]>
+ </doc>
+ </method>
+ <method name="getParameter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.ServletRequest"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get a parameter from a ServletRequest.
+ Return null if the parameter contains only white spaces.]]>
+ </doc>
+ </method>
+ <method name="htmlFooter" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[HTML footer to be added in the jsps.
+ @return the HTML footer.]]>
+ </doc>
+ </method>
+ <method name="percentageGraph" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="perc" type="int"/>
+ <param name="width" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate the percentage graph and returns HTML representation string
+ of the same.
+
+ @param perc The percentage value for which graph is to be generated
+ @param width The width of the display table
+ @return HTML String representation of the percentage graph
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="percentageGraph" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="perc" type="float"/>
+ <param name="width" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate the percentage graph and returns HTML representation string
+ of the same.
+ @param perc The percentage value for which graph is to be generated
+ @param width The width of the display table
+ @return HTML String representation of the percentage graph
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="HTML_TAIL" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.util.ServletUtil -->
+ <!-- start class org.apache.hadoop.util.Shell -->
+ <class name="Shell" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param interval the minimum duration to wait before re-executing the
+ command.]]>
+ </doc>
+ </constructor>
+ <method name="getGROUPS_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's groups list]]>
+ </doc>
+ </method>
+ <method name="getGET_PERMISSION_COMMAND" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return a Unix command to get permission information.]]>
+ </doc>
+ </method>
+ <method name="getUlimitMemoryCommand" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the Unix command for setting the maximum virtual memory available
+ to a given child process. This is only relevant when we are forking a
+ process from within the {@link org.apache.hadoop.mapred.Mapper} or the
+ {@link org.apache.hadoop.mapred.Reducer} implementations
+ e.g. <a href="{@docRoot}/org/apache/hadoop/mapred/pipes/package-summary.html">Hadoop Pipes</a>
+ or <a href="{@docRoot}/org/apache/hadoop/streaming/package-summary.html">Hadoop Streaming</a>.
+
+ It also checks to ensure that we are running on a *nix platform else
+ (e.g. in Cygwin/Windows) it returns <code>null</code>.
+ @param conf configuration
+ @return a <code>String[]</code> with the ulimit command arguments or
+ <code>null</code> if we are running on a non *nix platform or
+ if the limit is unspecified.]]>
+ </doc>
+ </method>
+ <method name="setEnvironment"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="env" type="java.util.Map"/>
+ <doc>
+ <![CDATA[set the environment for the command
+ @param env Mapping of environment variables]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dir" type="java.io.File"/>
+ <doc>
+ <![CDATA[set the working directory
+ @param dir The directory where the command would be executed]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[check to see if a command needs to be executed and execute if needed]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[return an array containing the command name & its parameters]]>
+ </doc>
+ </method>
+ <method name="parseExecResult"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parse the execution result]]>
+ </doc>
+ </method>
+ <method name="getProcess" return="java.lang.Process"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the current sub-process executing the given command
+ @return process executing the command]]>
+ </doc>
+ </method>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[get the exit code
+ @return the exit code of the process]]>
+ </doc>
+ </method>
+ <method name="execCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Static method to execute a shell command.
+ Covers most of the simple cases without requiring the user to implement
+ the <code>Shell</code> interface.
+ @param cmd shell command to execute.
+ @return the output of the executed command.]]>
+ </doc>
+ </method>
+ <method name="execCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="env" type="java.util.Map"/>
+ <param name="cmd" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Static method to execute a shell command.
+ Covers most of the simple cases without requiring the user to implement
+ the <code>Shell</code> interface.
+ @param env the map of environment key=value
+ @param cmd shell command to execute.
+ @return the output of the executed command.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USER_NAME_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to get the current user's name]]>
+ </doc>
+ </field>
+ <field name="SET_PERMISSION_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set permission]]>
+ </doc>
+ </field>
+ <field name="SET_OWNER_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[a Unix command to set owner]]>
+ </doc>
+ </field>
+ <field name="SET_GROUP_COMMAND" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WINDOWS" type="boolean"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set to true on Windows platforms]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A base class for running a Unix command.
+
+ <code>Shell</code> can be used to run unix commands like <code>du</code> or
+ <code>df</code>. It also offers facilities to gate commands by
+ time-intervals.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell -->
+ <!-- start class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <class name="Shell.ExitCodeException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ExitCodeException" type="int, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExitCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This is an IOException with exit code added.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ExitCodeException -->
+ <!-- start class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <class name="Shell.ShellCommandExecutor" extends="org.apache.hadoop.util.Shell"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File, java.util.Map"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="execute"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Execute the shell command.]]>
+ </doc>
+ </method>
+ <method name="getExecString" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="parseExecResult"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="lines" type="java.io.BufferedReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getOutput" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the output of the shell command.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the commands of this instance.
+ Arguments with spaces in are presented with quotes round; other
+ arguments are presented raw
+
+ @return a string representation of the object.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple shell command executor.
+
+ <code>ShellCommandExecutor</code>should be used in cases where the output
+ of the command needs no explicit parsing and where the command, working
+ directory and the environment remains unchanged. The output of the command
+ is stored as-is and is expected to be small.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.Shell.ShellCommandExecutor -->
+ <!-- start class org.apache.hadoop.util.StringUtils -->
+ <class name="StringUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StringUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="stringifyException" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="e" type="java.lang.Throwable"/>
+ <doc>
+ <![CDATA[Make a string representation of the exception.
+ @param e The exception to stringify
+ @return A string with exception name and call stack.]]>
+ </doc>
+ </method>
+ <method name="simpleHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fullHostname" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a full hostname, return the word upto the first dot.
+ @param fullHostname the full hostname
+ @return the hostname to the first dot]]>
+ </doc>
+ </method>
+ <method name="humanReadableInt" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="number" type="long"/>
+ <doc>
+ <![CDATA[Given an integer, return a string that is in an approximate, but human
+ readable format.
+ It uses the bases 'k', 'm', and 'g' for 1024, 1024**2, and 1024**3.
+ @param number the number to format
+ @return a human readable form of the integer]]>
+ </doc>
+ </method>
+ <method name="formatPercent" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="done" type="double"/>
+ <param name="digits" type="int"/>
+ <doc>
+ <![CDATA[Format a percentage for presentation to the user.
+ @param done the percentage to format (0.0 to 1.0)
+ @param digits the number of digits past the decimal point
+ @return a string representation of the percentage]]>
+ </doc>
+ </method>
+ <method name="arrayToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="strs" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Given an array of strings, return a comma-separated list of its elements.
+ @param strs Array of strings
+ @return Empty string if strs.length is 0, comma separated list of strings
+ otherwise]]>
+ </doc>
+ </method>
+ <method name="byteToHexString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="end" type="int"/>
+ <doc>
+ <![CDATA[Given an array of bytes it will convert the bytes to a hex string
+ representation of the bytes
+ @param bytes
+ @param start start index, inclusively
+ @param end end index, exclusively
+ @return hex string representation of the byte array]]>
+ </doc>
+ </method>
+ <method name="byteToHexString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Same as byteToHexString(bytes, 0, bytes.length).]]>
+ </doc>
+ </method>
+ <method name="hexStringToByte" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="hex" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Given a hexstring this will return the byte array corresponding to the
+ string
+ @param hex the hex String array
+ @return a byte array that is a hex string representation of the given
+ string. The size of the byte array is therefore hex.length/2]]>
+ </doc>
+ </method>
+ <method name="uriToString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uris" type="java.net.URI[]"/>
+ <doc>
+ <![CDATA[@param uris]]>
+ </doc>
+ </method>
+ <method name="stringToURI" return="java.net.URI[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="stringToPath" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[@param str]]>
+ </doc>
+ </method>
+ <method name="formatTimeDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Given a finish and start time in long milliseconds, returns a
+ String in the format Xhrs, Ymins, Z sec, for the time difference between two times.
+ If finish time comes before start time then negative valeus of X, Y and Z wil return.
+
+ @param finishTime finish time
+ @param startTime start time]]>
+ </doc>
+ </method>
+ <method name="formatTime" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="timeDiff" type="long"/>
+ <doc>
+ <![CDATA[Given the time in long milliseconds, returns a
+ String in the format Xhrs, Ymins, Z sec.
+
+ @param timeDiff The time difference to format]]>
+ </doc>
+ </method>
+ <method name="getFormattedTimeWithDiff" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dateFormat" type="java.text.DateFormat"/>
+ <param name="finishTime" type="long"/>
+ <param name="startTime" type="long"/>
+ <doc>
+ <![CDATA[Formats time in ms and appends difference (finishTime - startTime)
+ as returned by formatTimeDiff().
+ If finish time is 0, empty string is returned, if start time is 0
+ then difference is not appended to return value.
+ @param dateFormat date format to use
+ @param finishTime fnish time
+ @param startTime start time
+ @return formatted value.]]>
+ </doc>
+ </method>
+ <method name="getStrings" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns an arraylist of strings.
+ @param str the comma seperated string values
+ @return the arraylist of the comma seperated string values]]>
+ </doc>
+ </method>
+ <method name="getStringCollection" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns a collection of strings.
+ @param str comma seperated string values
+ @return an <code>ArrayList</code> of string values]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Split a string using the default separator
+ @param str a string that may have escaped separator
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="split" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="separator" type="char"/>
+ <doc>
+ <![CDATA[Split a string using the given separator
+ @param str a string that may have escaped separator
+ @param escapeChar a char that be used to escape the separator
+ @param separator a separator char
+ @return an array of strings]]>
+ </doc>
+ </method>
+ <method name="findNext" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="separator" type="char"/>
+ <param name="escapeChar" type="char"/>
+ <param name="start" type="int"/>
+ <param name="split" type="java.lang.StringBuilder"/>
+ <doc>
+ <![CDATA[Finds the first occurrence of the separator character ignoring the escaped
+ separators starting from the index. Note the substring between the index
+ and the position of the separator is passed.
+ @param str the source string
+ @param separator the character to find
+ @param escapeChar character used to escape
+ @param start from where to search
+ @param split used to pass back the extracted string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Escape commas in the string using the default escape char
+ @param str a string
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Escape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the char to be escaped
+ @return an escaped string]]>
+ </doc>
+ </method>
+ <method name="escapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charsToEscape" type="char[]"/>
+ <doc>
+ <![CDATA[@param charsToEscape array of characters to be escaped]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Unescape commas in the string using the default escape char
+ @param str a string
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charToEscape" type="char"/>
+ <doc>
+ <![CDATA[Unescape <code>charToEscape</code> in the string
+ with the escape char <code>escapeChar</code>
+
+ @param str string
+ @param escapeChar escape char
+ @param charToEscape the escaped char
+ @return an unescaped string]]>
+ </doc>
+ </method>
+ <method name="unEscapeString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <param name="escapeChar" type="char"/>
+ <param name="charsToEscape" type="char[]"/>
+ <doc>
+ <![CDATA[@param charsToEscape array of characters to unescape]]>
+ </doc>
+ </method>
+ <method name="getHostname" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return hostname without throwing exception.
+ @return hostname]]>
+ </doc>
+ </method>
+ <method name="startupShutdownMessage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="clazz" type="java.lang.Class"/>
+ <param name="args" type="java.lang.String[]"/>
+ <param name="LOG" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Print a log message for starting up and shutting down
+ @param clazz the class of the server
+ @param args arguments
+ @param LOG the target log object]]>
+ </doc>
+ </method>
+ <method name="escapeHTML" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="string" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Escapes HTML Special characters present in the string.
+ @param string
+ @return HTML Escaped String representation]]>
+ </doc>
+ </method>
+ <method name="byteDesc" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="len" type="long"/>
+ <doc>
+ <![CDATA[Return an abbreviated English-language desc of the byte length]]>
+ </doc>
+ </method>
+ <method name="limitDecimalTo2" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="d" type="double"/>
+ </method>
+ <field name="COMMA" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA_STR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ESCAPE_CHAR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[General string utils]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.StringUtils -->
+ <!-- start class org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix -->
+ <class name="StringUtils.TraditionalBinaryPrefix" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="symbol" type="char"/>
+ <doc>
+ <![CDATA[@return The TraditionalBinaryPrefix object corresponding to the symbol.]]>
+ </doc>
+ </method>
+ <method name="string2long" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convert a string to long.
+ The input string is first be trimmed
+ and then it is parsed with traditional binary prefix.
+
+ For example,
+ "-1230k" will be converted to -1230 * 1024 = -1259520;
+ "891g" will be converted to 891 * 1024^3 = 956703965184;
+
+ @param s input string
+ @return a long value represented by the input string.]]>
+ </doc>
+ </method>
+ <field name="KILO" type="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MEGA" type="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="GIGA" type="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TERA" type="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PETA" type="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="EXA" type="org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="value" type="long"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="symbol" type="char"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The traditional binary prefixes, kilo, mega, ..., exa,
+ which can be represented by a 64-bit integer.
+ TraditionalBinaryPrefix symbol are case insensitive.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix -->
+ <!-- start interface org.apache.hadoop.util.Tool -->
+ <interface name="Tool" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Execute the command with the given arguments.
+
+ @param args command specific arguments.
+ @return exit code.
+ @throws Exception]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A tool interface that supports handling of generic command-line options.
+
+ <p><code>Tool</code>, is the standard for any Map-Reduce tool/application.
+ The tool/application should delegate the handling of
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ standard command-line options</a> to {@link ToolRunner#run(Tool, String[])}
+ and only handle its custom arguments.</p>
+
+ <p>Here is how a typical <code>Tool</code> is implemented:</p>
+ <p><blockquote><pre>
+ public class MyApp extends Configured implements Tool {
+
+ public int run(String[] args) throws Exception {
+ // <code>Configuration</code> processed by <code>ToolRunner</code>
+ Configuration conf = getConf();
+
+ // Create a JobConf using the processed <code>conf</code>
+ JobConf job = new JobConf(conf, MyApp.class);
+
+ // Process custom command-line options
+ Path in = new Path(args[1]);
+ Path out = new Path(args[2]);
+
+ // Specify various job-specific parameters
+ job.setJobName("my-app");
+ job.setInputPath(in);
+ job.setOutputPath(out);
+ job.setMapperClass(MyApp.MyMapper.class);
+ job.setReducerClass(MyApp.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ }
+
+ public static void main(String[] args) throws Exception {
+ // Let <code>ToolRunner</code> handle generic command-line options
+ int res = ToolRunner.run(new Configuration(), new Sort(), args);
+
+ System.exit(res);
+ }
+ }
+ </pre></blockquote></p>
+
+ @see GenericOptionsParser
+ @see ToolRunner]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.Tool -->
+ <!-- start class org.apache.hadoop.util.ToolRunner -->
+ <class name="ToolRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ToolRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the given <code>Tool</code> by {@link Tool#run(String[])}, after
+ parsing with the given generic arguments. Uses the given
+ <code>Configuration</code>, or builds one if null.
+
+ Sets the <code>Tool</code>'s configuration with the possibly modified
+ version of the <code>conf</code>.
+
+ @param conf <code>Configuration</code> for the <code>Tool</code>.
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tool" type="org.apache.hadoop.util.Tool"/>
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Runs the <code>Tool</code> with its <code>Configuration</code>.
+
+ Equivalent to <code>run(tool.getConf(), tool, args)</code>.
+
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+ </doc>
+ </method>
+ <method name="printGenericCommandUsage"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.PrintStream"/>
+ <doc>
+ <![CDATA[Prints generic command-line argurments and usage information.
+
+ @param out stream to write usage information to.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A utility to help run {@link Tool}s.
+
+ <p><code>ToolRunner</code> can be used to run classes implementing
+ <code>Tool</code> interface. It works in conjunction with
+ {@link GenericOptionsParser} to parse the
+ <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ generic hadoop command line arguments</a> and modifies the
+ <code>Configuration</code> of the <code>Tool</code>. The
+ application-specific options are passed along without being modified.
+ </p>
+
+ @see Tool
+ @see GenericOptionsParser]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.ToolRunner -->
+ <!-- start class org.apache.hadoop.util.UTF8ByteArrayUtils -->
+ <class name="UTF8ByteArrayUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="UTF8ByteArrayUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="findByte" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="end" type="int"/>
+ <param name="b" type="byte"/>
+ <doc>
+ <![CDATA[Find the first occurrence of the given byte b in a UTF-8 encoded string
+ @param utf a byte array containing a UTF-8 encoded string
+ @param start starting offset
+ @param end ending position
+ @param b the byte to find
+ @return position that first byte occures otherwise -1]]>
+ </doc>
+ </method>
+ <method name="findBytes" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="end" type="int"/>
+ <param name="b" type="byte[]"/>
+ <doc>
+ <![CDATA[Find the first occurrence of the given bytes b in a UTF-8 encoded string
+ @param utf a byte array containing a UTF-8 encoded string
+ @param start starting offset
+ @param end ending position
+ @param b the bytes to find
+ @return position that first byte occures otherwise -1]]>
+ </doc>
+ </method>
+ <method name="findNthByte" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="b" type="byte"/>
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Find the nth occurrence of the given byte b in a UTF-8 encoded string
+ @param utf a byte array containing a UTF-8 encoded string
+ @param start starting offset
+ @param length the length of byte array
+ @param b the byte to find
+ @param n the desired occurrence of the given byte
+ @return position that nth occurrence of the given byte if exists; otherwise -1]]>
+ </doc>
+ </method>
+ <method name="findNthByte" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="b" type="byte"/>
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Find the nth occurrence of the given byte b in a UTF-8 encoded string
+ @param utf a byte array containing a UTF-8 encoded string
+ @param b the byte to find
+ @param n the desired occurrence of the given byte
+ @return position that nth occurrence of the given byte if exists; otherwise -1]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.util.UTF8ByteArrayUtils -->
+ <!-- start class org.apache.hadoop.util.VersionInfo -->
+ <class name="VersionInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="VersionInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the Hadoop version.
+ @return the Hadoop version string, eg. "0.6.3-dev"]]>
+ </doc>
+ </method>
+ <method name="getRevision" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion revision number for the root directory
+ @return the revision number, eg. "451451"]]>
+ </doc>
+ </method>
+ <method name="getDate" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The date that Hadoop was compiled.
+ @return the compilation date in unix date format]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The user that compiled Hadoop.
+ @return the username of the user]]>
+ </doc>
+ </method>
+ <method name="getUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the subversion URL for the root Hadoop directory.]]>
+ </doc>
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the buildVersion which includes version,
+ revision, user and date.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[This class finds the package info for Hadoop and the HadoopVersionAnnotation
+ information.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.VersionInfo -->
+ <!-- start class org.apache.hadoop.util.XMLUtils -->
+ <class name="XMLUtils" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="XMLUtils"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="transform"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="styleSheet" type="java.io.InputStream"/>
+ <param name="xml" type="java.io.InputStream"/>
+ <param name="out" type="java.io.Writer"/>
+ <exception name="TransformerConfigurationException" type="javax.xml.transform.TransformerConfigurationException"/>
+ <exception name="TransformerException" type="javax.xml.transform.TransformerException"/>
+ <doc>
+ <![CDATA[Transform input xml given a stylesheet.
+
+ @param styleSheet the style-sheet
+ @param xml input xml data
+ @param out output
+ @throws TransformerConfigurationException
+ @throws TransformerException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[General xml utilities.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.XMLUtils -->
+</package>
+<package name="org.apache.hadoop.util.bloom">
+ <!-- start class org.apache.hadoop.util.bloom.BloomFilter -->
+ <class name="BloomFilter" extends="org.apache.hadoop.util.bloom.Filter"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="BloomFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor - use with readFields]]>
+ </doc>
+ </constructor>
+ <constructor name="BloomFilter" type="int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+ @param vectorSize The vector size of <i>this</i> filter.
+ @param nbHash The number of hash function to consider.
+ @param hashType type of the hashing function (see
+ {@link org.apache.hadoop.util.hash.Hash}).]]>
+ </doc>
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+ </method>
+ <method name="and"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+ </method>
+ <method name="membershipTest" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+ </method>
+ <method name="not"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="or"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+ </method>
+ <method name="xor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getVectorSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return size of the the bloomfilter]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Implements a <i>Bloom filter</i>, as defined by Bloom in 1970.
+ <p>
+ The Bloom filter is a data structure that was introduced in 1970 and that has been adopted by
+ the networking research community in the past decade thanks to the bandwidth efficiencies that it
+ offers for the transmission of set membership information between networked hosts. A sender encodes
+ the information into a bit vector, the Bloom filter, that is more compact than a conventional
+ representation. Computation and space costs for construction are linear in the number of elements.
+ The receiver uses the filter to test whether various elements are members of the set. Though the
+ filter will occasionally return a false positive, it will never return a false negative. When creating
+ the filter, the sender can choose its desired point in a trade-off between the false positive rate and the size.
+
+ <p>
+ Originally created by
+ <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
+
+ @see Filter The general behavior of a filter
+
+ @see <a href="http://portal.acm.org/citation.cfm?id=362692&dl=ACM&coll=portal">Space/Time Trade-Offs in Hash Coding with Allowable Errors</a>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.bloom.BloomFilter -->
+ <!-- start class org.apache.hadoop.util.bloom.CountingBloomFilter -->
+ <class name="CountingBloomFilter" extends="org.apache.hadoop.util.bloom.Filter"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CountingBloomFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor - use with readFields]]>
+ </doc>
+ </constructor>
+ <constructor name="CountingBloomFilter" type="int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+ @param vectorSize The vector size of <i>this</i> filter.
+ @param nbHash The number of hash function to consider.
+ @param hashType type of the hashing function (see
+ {@link org.apache.hadoop.util.hash.Hash}).]]>
+ </doc>
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+ </method>
+ <method name="delete"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+ <doc>
+ <![CDATA[Removes a specified key from <i>this</i> counting Bloom filter.
+ <p>
+ <b>Invariant</b>: nothing happens if the specified key does not belong to <i>this</i> counter Bloom filter.
+ @param key The key to remove.]]>
+ </doc>
+ </method>
+ <method name="and"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+ </method>
+ <method name="membershipTest" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+ </method>
+ <method name="approximateCount" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+ <doc>
+ <![CDATA[This method calculates an approximate count of the key, i.e. how many
+ times the key was added to the filter. This allows the filter to be
+ used as an approximate <code>key -&gt; count</code> map.
+ <p>NOTE: due to the bucket size of this filter, inserting the same
+ key more than 15 times will cause an overflow at all filter positions
+ associated with this key, and it will significantly increase the error
+ rate for this and other keys. For this reason the filter can only be
+ used to store small count values <code>0 &lt;= N &lt;&lt; 15</code>.
+ @param key key to be tested
+ @return 0 if the key is not present. Otherwise, a positive value v will
+ be returned such that <code>v == count</code> with probability equal to the
+ error rate of this filter, and <code>v &gt; count</code> otherwise.
+ Additionally, if the filter experienced an underflow as a result of
+ {@link #delete(Key)} operation, the return value may be lower than the
+ <code>count</code> with the probability of the false negative rate of such
+ filter.]]>
+ </doc>
+ </method>
+ <method name="not"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="or"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+ </method>
+ <method name="xor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Implements a <i>counting Bloom filter</i>, as defined by Fan et al. in a ToN
+ 2000 paper.
+ <p>
+ A counting Bloom filter is an improvement to standard a Bloom filter as it
+ allows dynamic additions and deletions of set membership information. This
+ is achieved through the use of a counting vector instead of a bit vector.
+ <p>
+ Originally created by
+ <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
+
+ @see Filter The general behavior of a filter
+
+ @see <a href="http://portal.acm.org/citation.cfm?id=343571.343572">Summary cache: a scalable wide-area web cache sharing protocol</a>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.bloom.CountingBloomFilter -->
+ <!-- start class org.apache.hadoop.util.bloom.DynamicBloomFilter -->
+ <class name="DynamicBloomFilter" extends="org.apache.hadoop.util.bloom.Filter"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DynamicBloomFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Zero-args constructor for the serialization.]]>
+ </doc>
+ </constructor>
+ <constructor name="DynamicBloomFilter" type="int, int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor.
+ <p>
+ Builds an empty Dynamic Bloom filter.
+ @param vectorSize The number of bits in the vector.
+ @param nbHash The number of hash function to consider.
+ @param hashType type of the hashing function (see
+ {@link org.apache.hadoop.util.hash.Hash}).
+ @param nr The threshold for the maximum number of keys to record in a
+ dynamic Bloom filter row.]]>
+ </doc>
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+ </method>
+ <method name="and"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+ </method>
+ <method name="membershipTest" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+ </method>
+ <method name="not"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="or"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+ </method>
+ <method name="xor"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Implements a <i>dynamic Bloom filter</i>, as defined in the INFOCOM 2006 paper.
+ <p>
+ A dynamic Bloom filter (DBF) makes use of a <code>s * m</code> bit matrix but
+ each of the <code>s</code> rows is a standard Bloom filter. The creation
+ process of a DBF is iterative. At the start, the DBF is a <code>1 * m</code>
+ bit matrix, i.e., it is composed of a single standard Bloom filter.
+ It assumes that <code>n<sub>r</sub></code> elements are recorded in the
+ initial bit vector, where <code>n<sub>r</sub> <= n</code> (<code>n</code> is
+ the cardinality of the set <code>A</code> to record in the filter).
+ <p>
+ As the size of <code>A</code> grows during the execution of the application,
+ several keys must be inserted in the DBF. When inserting a key into the DBF,
+ one must first get an active Bloom filter in the matrix. A Bloom filter is
+ active when the number of recorded keys, <code>n<sub>r</sub></code>, is
+ strictly less than the current cardinality of <code>A</code>, <code>n</code>.
+ If an active Bloom filter is found, the key is inserted and
+ <code>n<sub>r</sub></code> is incremented by one. On the other hand, if there
+ is no active Bloom filter, a new one is created (i.e., a new row is added to
+ the matrix) according to the current size of <code>A</code> and the element
+ is added in this new Bloom filter and the <code>n<sub>r</sub></code> value of
+ this new Bloom filter is set to one. A given key is said to belong to the
+ DBF if the <code>k</code> positions are set to one in one of the matrix rows.
+ <p>
+ Originally created by
+ <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
+
+ @see Filter The general behavior of a filter
+ @see BloomFilter A Bloom filter
+
+ @see <a href="http://www.cse.fau.edu/~jie/research/publications/Publication_files/infocom2006.pdf">Theory and Network Applications of Dynamic Bloom Filters</a>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.bloom.DynamicBloomFilter -->
+ <!-- start class org.apache.hadoop.util.bloom.Filter -->
+ <class name="Filter" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="Filter"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Filter" type="int, int, int"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor.
+ @param vectorSize The vector size of <i>this</i> filter.
+ @param nbHash The number of hash functions to consider.
+ @param hashType type of the hashing function (see {@link Hash}).]]>
+ </doc>
+ </constructor>
+ <method name="add"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+ <doc>
+ <![CDATA[Adds a key to <i>this</i> filter.
+ @param key The key to add.]]>
+ </doc>
+ </method>
+ <method name="membershipTest" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+ <doc>
+ <![CDATA[Determines wether a specified key belongs to <i>this</i> filter.
+ @param key The key to test.
+ @return boolean True if the specified key belongs to <i>this</i> filter.
+ False otherwise.]]>
+ </doc>
+ </method>
+ <method name="and"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+ <doc>
+ <![CDATA[Peforms a logical AND between <i>this</i> filter and a specified filter.
+ <p>
+ <b>Invariant</b>: The result is assigned to <i>this</i> filter.
+ @param filter The filter to AND with.]]>
+ </doc>
+ </method>
+ <method name="or"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+ <doc>
+ <![CDATA[Peforms a logical OR between <i>this</i> filter and a specified filter.
+ <p>
+ <b>Invariant</b>: The result is assigned to <i>this</i> filter.
+ @param filter The filter to OR with.]]>
+ </doc>
+ </method>
+ <method name="xor"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+ <doc>
+ <![CDATA[Peforms a logical XOR between <i>this</i> filter and a specified filter.
+ <p>
+ <b>Invariant</b>: The result is assigned to <i>this</i> filter.
+ @param filter The filter to XOR with.]]>
+ </doc>
+ </method>
+ <method name="not"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Performs a logical NOT on <i>this</i> filter.
+ <p>
+ The result is assigned to <i>this</i> filter.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keys" type="java.util.List"/>
+ <doc>
+ <![CDATA[Adds a list of keys to <i>this</i> filter.
+ @param keys The list of keys.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keys" type="java.util.Collection"/>
+ <doc>
+ <![CDATA[Adds a collection of keys to <i>this</i> filter.
+ @param keys The collection of keys.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keys" type="org.apache.hadoop.util.bloom.Key[]"/>
+ <doc>
+ <![CDATA[Adds an array of keys to <i>this</i> filter.
+ @param keys The array of keys.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="vectorSize" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The vector size of <i>this</i> filter.]]>
+ </doc>
+ </field>
+ <field name="hash" type="org.apache.hadoop.util.bloom.HashFunction"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The hash function used to map a key to several positions in the vector.]]>
+ </doc>
+ </field>
+ <field name="nbHash" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of hash function to consider.]]>
+ </doc>
+ </field>
+ <field name="hashType" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Type of hashing function to use.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Defines the general behavior of a filter.
+ <p>
+ A filter is a data structure which aims at offering a lossy summary of a set <code>A</code>. The
+ key idea is to map entries of <code>A</code> (also called <i>keys</i>) into several positions
+ in a vector through the use of several hash functions.
+ <p>
+ Typically, a filter will be implemented as a Bloom filter (or a Bloom filter extension).
+ <p>
+ It must be extended in order to define the real behavior.
+
+ @see Key The general behavior of a key
+ @see HashFunction A hash function]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.bloom.Filter -->
+ <!-- start class org.apache.hadoop.util.bloom.HashFunction -->
+ <class name="HashFunction" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HashFunction" type="int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor.
+ <p>
+ Builds a hash function that must obey to a given maximum number of returned values and a highest value.
+ @param maxValue The maximum highest returned value.
+ @param nbHash The number of resulting hashed values.
+ @param hashType type of the hashing function (see {@link Hash}).]]>
+ </doc>
+ </constructor>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Clears <i>this</i> hash function. A NOOP]]>
+ </doc>
+ </method>
+ <method name="hash" return="int[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="k" type="org.apache.hadoop.util.bloom.Key"/>
+ <doc>
+ <![CDATA[Hashes a specified key into several integers.
+ @param k The specified key.
+ @return The array of hashed values.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implements a hash object that returns a certain number of hashed values.
+
+ @see Key The general behavior of a key being stored in a filter
+ @see Filter The general behavior of a filter]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.bloom.HashFunction -->
+ <!-- start class org.apache.hadoop.util.bloom.Key -->
+ <class name="Key" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="Key"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor - use with readFields]]>
+ </doc>
+ </constructor>
+ <constructor name="Key" type="byte[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor.
+ <p>
+ Builds a key with a default weight.
+ @param value The byte value of <i>this</i> key.]]>
+ </doc>
+ </constructor>
+ <constructor name="Key" type="byte[], double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor.
+ <p>
+ Builds a key with a specified weight.
+ @param value The value of <i>this</i> key.
+ @param weight The weight associated to <i>this</i> key.]]>
+ </doc>
+ </constructor>
+ <method name="set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="byte[]"/>
+ <param name="weight" type="double"/>
+ <doc>
+ <![CDATA[@param value
+ @param weight]]>
+ </doc>
+ </method>
+ <method name="getBytes" return="byte[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return byte[] The value of <i>this</i> key.]]>
+ </doc>
+ </method>
+ <method name="getWeight" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Returns the weight associated to <i>this</i> key.]]>
+ </doc>
+ </method>
+ <method name="incrementWeight"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="weight" type="double"/>
+ <doc>
+ <![CDATA[Increments the weight of <i>this</i> key with a specified value.
+ @param weight The increment.]]>
+ </doc>
+ </method>
+ <method name="incrementWeight"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Increments the weight of <i>this</i> key by one.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.util.bloom.Key"/>
+ </method>
+ <doc>
+ <![CDATA[The general behavior of a key that must be stored in a filter.
+
+ @see Filter The general behavior of a filter]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.bloom.Key -->
+ <!-- start interface org.apache.hadoop.util.bloom.RemoveScheme -->
+ <interface name="RemoveScheme" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="RANDOM" type="short"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Random selection.
+ <p>
+ The idea is to randomly select a bit to reset.]]>
+ </doc>
+ </field>
+ <field name="MINIMUM_FN" type="short"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[MinimumFN Selection.
+ <p>
+ The idea is to select the bit to reset that will generate the minimum
+ number of false negative.]]>
+ </doc>
+ </field>
+ <field name="MAXIMUM_FP" type="short"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[MaximumFP Selection.
+ <p>
+ The idea is to select the bit to reset that will remove the maximum number
+ of false positive.]]>
+ </doc>
+ </field>
+ <field name="RATIO" type="short"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Ratio Selection.
+ <p>
+ The idea is to select the bit to reset that will, at the same time, remove
+ the maximum number of false positve while minimizing the amount of false
+ negative generated.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Defines the different remove scheme for retouched Bloom filters.
+ <p>
+ Originally created by
+ <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.util.bloom.RemoveScheme -->
+ <!-- start class org.apache.hadoop.util.bloom.RetouchedBloomFilter -->
+ <class name="RetouchedBloomFilter" extends="org.apache.hadoop.util.bloom.BloomFilter"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.bloom.RemoveScheme"/>
+ <constructor name="RetouchedBloomFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor - use with readFields]]>
+ </doc>
+ </constructor>
+ <constructor name="RetouchedBloomFilter" type="int, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor
+ @param vectorSize The vector size of <i>this</i> filter.
+ @param nbHash The number of hash function to consider.
+ @param hashType type of the hashing function (see
+ {@link org.apache.hadoop.util.hash.Hash}).]]>
+ </doc>
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+ </method>
+ <method name="addFalsePositive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+ <doc>
+ <![CDATA[Adds a false positive information to <i>this</i> retouched Bloom filter.
+ <p>
+ <b>Invariant</b>: if the false positive is <code>null</code>, nothing happens.
+ @param key The false positive key to add.]]>
+ </doc>
+ </method>
+ <method name="addFalsePositive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="coll" type="java.util.Collection"/>
+ <doc>
+ <![CDATA[Adds a collection of false positive information to <i>this</i> retouched Bloom filter.
+ @param coll The collection of false positive.]]>
+ </doc>
+ </method>
+ <method name="addFalsePositive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keys" type="java.util.List"/>
+ <doc>
+ <![CDATA[Adds a list of false positive information to <i>this</i> retouched Bloom filter.
+ @param keys The list of false positive.]]>
+ </doc>
+ </method>
+ <method name="addFalsePositive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keys" type="org.apache.hadoop.util.bloom.Key[]"/>
+ <doc>
+ <![CDATA[Adds an array of false positive information to <i>this</i> retouched Bloom filter.
+ @param keys The array of false positive.]]>
+ </doc>
+ </method>
+ <method name="selectiveClearing"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="k" type="org.apache.hadoop.util.bloom.Key"/>
+ <param name="scheme" type="short"/>
+ <doc>
+ <![CDATA[Performs the selective clearing for a given key.
+ @param k The false positive key to remove from <i>this</i> retouched Bloom filter.
+ @param scheme The selective clearing scheme to apply.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Implements a <i>retouched Bloom filter</i>, as defined in the CoNEXT 2006 paper.
+ <p>
+ It allows the removal of selected false positives at the cost of introducing
+ random false negatives, and with the benefit of eliminating some random false
+ positives at the same time.
+
+ <p>
+ Originally created by
+ <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
+
+ @see Filter The general behavior of a filter
+ @see BloomFilter A Bloom filter
+ @see RemoveScheme The different selective clearing algorithms
+
+ @see <a href="http://www-rp.lip6.fr/site_npa/site_rp/_publications/740-rbf_cameraready.pdf">Retouched Bloom Filters: Allowing Networked Applications to Trade Off Selected False Positives Against False Negatives</a>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.bloom.RetouchedBloomFilter -->
+</package>
+<package name="org.apache.hadoop.util.hash">
+ <!-- start class org.apache.hadoop.util.hash.Hash -->
+ <class name="Hash" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Hash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseHashType" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[This utility method converts String representation of hash function name
+ to a symbolic constant. Currently two function types are supported,
+ "jenkins" and "murmur".
+ @param name hash function name
+ @return one of the predefined constants]]>
+ </doc>
+ </method>
+ <method name="getHashType" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[This utility method converts the name of the configured
+ hash type to a symbolic constant.
+ @param conf configuration
+ @return one of the predefined constants]]>
+ </doc>
+ </method>
+ <method name="getInstance" return="org.apache.hadoop.util.hash.Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="int"/>
+ <doc>
+ <![CDATA[Get a singleton instance of hash function of a given type.
+ @param type predefined hash type
+ @return hash function instance, or null if type is invalid]]>
+ </doc>
+ </method>
+ <method name="getInstance" return="org.apache.hadoop.util.hash.Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get a singleton instance of hash function of a type
+ defined in the configuration.
+ @param conf current configuration
+ @return defined hash type, or null if type is invalid]]>
+ </doc>
+ </method>
+ <method name="hash" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <doc>
+ <![CDATA[Calculate a hash using all bytes from the input argument, and
+ a seed of -1.
+ @param bytes input bytes
+ @return hash value]]>
+ </doc>
+ </method>
+ <method name="hash" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="initval" type="int"/>
+ <doc>
+ <![CDATA[Calculate a hash using all bytes from the input argument,
+ and a provided seed value.
+ @param bytes input bytes
+ @param initval seed value
+ @return hash value]]>
+ </doc>
+ </method>
+ <method name="hash" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="bytes" type="byte[]"/>
+ <param name="length" type="int"/>
+ <param name="initval" type="int"/>
+ <doc>
+ <![CDATA[Calculate a hash using bytes from 0 to <code>length</code>, and
+ the provided seed value
+ @param bytes input bytes
+ @param length length of the valid bytes to consider
+ @param initval seed value
+ @return hash value]]>
+ </doc>
+ </method>
+ <field name="INVALID_HASH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constant to denote invalid hash type.]]>
+ </doc>
+ </field>
+ <field name="JENKINS_HASH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constant to denote {@link JenkinsHash}.]]>
+ </doc>
+ </field>
+ <field name="MURMUR_HASH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constant to denote {@link MurmurHash}.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[This class represents a common API for hashing functions.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.hash.Hash -->
+ <!-- start class org.apache.hadoop.util.hash.JenkinsHash -->
+ <class name="JenkinsHash" extends="org.apache.hadoop.util.hash.Hash"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JenkinsHash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getInstance" return="org.apache.hadoop.util.hash.Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hash" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="byte[]"/>
+ <param name="nbytes" type="int"/>
+ <param name="initval" type="int"/>
+ <doc>
+ <![CDATA[taken from hashlittle() -- hash a variable-length key into a 32-bit value
+
+ @param key the key (the unaligned variable-length array of bytes)
+ @param nbytes number of bytes to include in hash
+ @param initval can be any integer value
+ @return a 32-bit value. Every bit of the key affects every bit of the
+ return value. Two keys differing by one or two bits will have totally
+ different hash values.
+
+ <p>The best hash table sizes are powers of 2. There is no need to do mod
+ a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask.
+ For example, if you need only 10 bits, do
+ <code>h = (h & hashmask(10));</code>
+ In which case, the hash table should have hashsize(10) elements.
+
+ <p>If you are hashing n strings byte[][] k, do it like this:
+ for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h);
+
+ <p>By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
+ code any way you wish, private, educational, or commercial. It's free.
+
+ <p>Use for hash table lookup, or anything where one collision in 2^^32 is
+ acceptable. Do NOT use for cryptographic purposes.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Compute the hash of the specified file
+ @param args name of file to compute hash of.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Produces 32-bit hash for hash table lookup.
+
+ <pre>lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+
+ You can use this free for any purpose. It's in the public domain.
+ It has no warranty.
+ </pre>
+
+ @see <a href="http://burtleburtle.net/bob/c/lookup3.c">lookup3.c</a>
+ @see <a href="http://www.ddj.com/184410284">Hash Functions (and how this
+ function compares to others such as CRC, MD?, etc</a>
+ @see <a href="http://burtleburtle.net/bob/hash/doobs.html">Has update on the
+ Dr. Dobbs Article</a>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.hash.JenkinsHash -->
+ <!-- start class org.apache.hadoop.util.hash.MurmurHash -->
+ <class name="MurmurHash" extends="org.apache.hadoop.util.hash.Hash"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MurmurHash"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getInstance" return="org.apache.hadoop.util.hash.Hash"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hash" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="data" type="byte[]"/>
+ <param name="length" type="int"/>
+ <param name="seed" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[This is a very fast, non-cryptographic hash suitable for general hash-based
+ lookup. See http://murmurhash.googlepages.com/ for more details.
+
+ <p>The C version of MurmurHash 2.0 found at that site was ported
+ to Java by Andrzej Bialecki (ab at getopt org).</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.util.hash.MurmurHash -->
+</package>
+<package name="org.apache.hadoop.mapred">
+ <!-- start class org.apache.hadoop.mapred.ClusterStatus -->
+ <class name="ClusterStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of task trackers in the cluster.
+
+ @return the number of task trackers in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getActiveTrackerNames" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the names of task trackers in the cluster.
+
+ @return the active task trackers in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getBlacklistedTrackerNames" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the names of task trackers in the cluster.
+
+ @return the blacklisted task trackers in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getBlacklistedTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of blacklisted task trackers in the cluster.
+
+ @return the number of blacklisted task trackers in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getTTExpiryInterval" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the tasktracker expiry interval for the cluster
+ @return the expiry interval in msec]]>
+ </doc>
+ </method>
+ <method name="getMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running map tasks in the cluster.
+
+ @return the number of currently running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of currently running reduce tasks in the cluster.
+
+ @return the number of currently running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running map tasks in the cluster.
+
+ @return the maximum capacity for running map tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum capacity for running reduce tasks in the cluster.
+
+ @return the maximum capacity for running reduce tasks in the cluster.]]>
+ </doc>
+ </method>
+ <method name="getJobTrackerState" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current state of the <code>JobTracker</code>,
+ as {@link JobTracker.State}
+
+ @return the current state of the <code>JobTracker</code>.]]>
+ </doc>
+ </method>
+ <method name="getUsedMemory" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the total heap memory used by the <code>JobTracker</code>
+
+ @return the size of heap memory used by the <code>JobTracker</code>]]>
+ </doc>
+ </method>
+ <method name="getMaxMemory" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum configured heap memory that can be used by the <code>JobTracker</code>
+
+ @return the configured size of max heap memory that can be used by the <code>JobTracker</code>]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Status information on the current state of the Map-Reduce cluster.
+
+ <p><code>ClusterStatus</code> provides clients with information such as:
+ <ol>
+ <li>
+ Size of the cluster.
+ </li>
+ <li>
+ Name of the trackers.
+ </li>
+ <li>
+ Task capacity of the cluster.
+ </li>
+ <li>
+ The number of currently running map & reduce tasks.
+ </li>
+ <li>
+ State of the <code>JobTracker</code>.
+ </li>
+ </ol></p>
+
+ <p>Clients can query for the latest <code>ClusterStatus</code>, via
+ {@link JobClient#getClusterStatus()}.</p>
+
+ @see JobClient]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ClusterStatus -->
+ <!-- start class org.apache.hadoop.mapred.Counters -->
+ <class name="Counters" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.Counters} instead.">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable"/>
+ <constructor name="Counters"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getGroupNames" return="java.util.Collection"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all counter classes.
+ @return Set of counter names.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getGroup" return="org.apache.hadoop.mapred.Counters.Group"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="groupName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the named counter group, or an empty group if there is none
+ with the specified name.]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Find the counter for the given enum. The same enum will always return the
+ same counter.
+ @param key the counter key
+ @return the matching counter object]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Find a counter given the group and the name.
+ @param group the name of the group
+ @param name the internal name of the counter
+ @return the counter for that name]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <param name="group" type="java.lang.String"/>
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Find a counter by using strings
+ @param group the name of the group
+ @param id the id of the counter within the group (0 to N-1)
+ @param name the internal name of the counter
+ @return the counter for that name
+ @deprecated]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param key identifies a counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="counter" type="java.lang.String"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param group the name of the group
+ @param counter the internal name of the counter
+ @param amount amount by which counter is to be incremented]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Returns current value of the specified counter, or 0 if the counter
+ does not exist.]]>
+ </doc>
+ </method>
+ <method name="incrAllCounters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Increments multiple counters by their amounts in another Counters
+ instance.
+ @param other the other Counters instance]]>
+ </doc>
+ </method>
+ <method name="sum" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="a" type="org.apache.hadoop.mapred.Counters"/>
+ <param name="b" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Convenience method for computing the sum of two sets of counters.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of counters, by summing the number of counters
+ in each group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the set of groups.
+ The external format is:
+ #groups (groupName group)*
+
+ i.e. the number of groups followed by 0 or more groups, where each
+ group is of the form:
+
+ groupDisplayName #counters (false | true counter)*
+
+ where each counter is of the form:
+
+ name (false | true displayName) value]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a set of groups.]]>
+ </doc>
+ </method>
+ <method name="log"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="log" type="org.apache.commons.logging.Log"/>
+ <doc>
+ <![CDATA[Logs the current counter values.
+ @param log The log to use.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return textual representation of the counter values.]]>
+ </doc>
+ </method>
+ <method name="makeCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert a counters object into a single line that is easy to parse.
+ @return the string with "name=value" for each counter and separated by ","]]>
+ </doc>
+ </method>
+ <method name="makeEscapedCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Represent the counter in a textual format that can be converted back to
+ its object form
+ @return the string in the following format
+ {(groupname)(group-displayname)[(countername)(displayname)(value)][][]}{}{}]]>
+ </doc>
+ </method>
+ <method name="fromEscapedCompactString" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compactString" type="java.lang.String"/>
+ <exception name="ParseException" type="java.text.ParseException"/>
+ <doc>
+ <![CDATA[Convert a stringified counter representation into a counter object. Note
+ that the counter can be recovered if its stringified using
+ {@link #makeEscapedCompactString()}.
+ @return a Counter]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ </method>
+ <doc>
+ <![CDATA[A set of named counters.
+
+ <p><code>Counters</code> represent global counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> can be of
+ any {@link Enum} type.</p>
+
+ <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
+ counters from a particular <code>Enum</code> class.
+ @deprecated Use {@link org.apache.hadoop.mapreduce.Counters} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Counter -->
+ <class name="Counters.Counter" extends="org.apache.hadoop.mapreduce.Counter"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="setDisplayName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newName" type="java.lang.String"/>
+ </method>
+ <method name="makeEscapedCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compact stringified version of the counter in the format
+ [(actual-name)(display-name)(value)]]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[What is the current value of this counter?
+ @return the current value]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A counter record, comprising its name and value.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Counter -->
+ <!-- start class org.apache.hadoop.mapred.Counters.Group -->
+ <class name="Counters.Group" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable"/>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns raw name of the group. This is the name of the enum class
+ for this group of counters.]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns localized name of the group. This is the same as getName() by
+ default, but different if an appropriate ResourceBundle is found.]]>
+ </doc>
+ </method>
+ <method name="setDisplayName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="displayName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the display name]]>
+ </doc>
+ </method>
+ <method name="makeEscapedCompactString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the compact stringified version of the group in the format
+ {(actual-name)(display-name)(value)[][][]} where [] are compact strings for the
+ counters within.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="obj" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Checks for (content) equality of Groups]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="counterName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value of the specified counter, or 0 if the counter does
+ not exist.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getCounter(String)} instead">
+ <param name="id" type="int"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given id and create it if it doesn't exist.
+ @param id the numeric id of the counter within the group
+ @param name the internal counter name
+ @return the counter
+ @deprecated use {@link #getCounter(String)} instead]]>
+ </doc>
+ </method>
+ <method name="getCounterForName" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the counter for the given name and create it if it doesn't exist.
+ @param name the internal counter name
+ @return the counter]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of counters in this group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[<code>Group</code> of counters, comprising of counters from a particular
+ counter {@link Enum} class.
+
+ <p><code>Group</code>handles localization of the class name and the
+ counter names.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.Counters.Group -->
+ <!-- start class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <class name="DefaultJobHistoryParser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="DefaultJobHistoryParser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="parseJobTasks"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobHistoryFile" type="java.lang.String"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobHistory.JobInfo"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Populates a JobInfo object from the job's history log file.
+ @param jobHistoryFile history file for this job.
+ @param job a precreated JobInfo object, should be non-null.
+ @param fs FileSystem where historyFile is present.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Default parser for job history files. It creates object model from
+ job history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.DefaultJobHistoryParser -->
+ <!-- start class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <class name="FileAlreadyExistsException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileAlreadyExistsException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="FileAlreadyExistsException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when target file already exists for any operation and
+ is not configured to be overwritten.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+ <!-- start class org.apache.hadoop.mapred.FileInputFormat -->
+ <class name="FileInputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat}
+ instead.">
+ <implements name="org.apache.hadoop.mapred.InputFormat"/>
+ <constructor name="FileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setMinSplitSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="minSplitSize" type="long"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="filename" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Is the given filename splitable? Usually, true, but if the file is
+ stream compressed, it will not be.
+
+ <code>FileInputFormat</code> implementations can override this and return
+ <code>false</code> to ensure that individual input files are never split-up
+ so that {@link Mapper}s process entire files.
+
+ @param fs the file system that the file is on
+ @param filename the file name to check
+ @return is this file splitable?]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setInputPathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="filter" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job.
+
+ @param filter the PathFilter class use for filtering the input paths.]]>
+ </doc>
+ </method>
+ <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get a PathFilter instance of the filter set for the input paths.
+
+ @return the PathFilter instance set for the job, NULL if none has been set.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression.
+
+ @param job the job to list input paths for
+ @return array of FileStatus objects
+ @throws IOException if zero items.]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Splits files returned by {@link #listStatus(JobConf)} when
+ they're too big.]]>
+ </doc>
+ </method>
+ <method name="computeSplitSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="goalSize" type="long"/>
+ <param name="minSize" type="long"/>
+ <param name="blockSize" type="long"/>
+ </method>
+ <method name="getBlockIndex" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+ <param name="offset" type="long"/>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the given comma separated paths as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be set as
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Add the given comma separated paths to the list of inputs for
+ the map-reduce job.
+
+ @param conf The configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be added to
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Set the array of {@link Path}s as the list of inputs
+ for the map-reduce job.
+
+ @param conf Configuration of the job.
+ @param inputPaths the {@link Path}s of the input directories/files
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+
+ @param conf The configuration of the job
+ @return the list of input {@link Path}s for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getSplitHosts" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+ <param name="offset" type="long"/>
+ <param name="splitSize" type="long"/>
+ <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This function identifies and returns the hosts that contribute
+ most for a given split. For calculating the contribution, rack
+ locality is treated on par with host locality, so hosts from racks
+ that contribute the most are preferred over hosts on racks that
+ contribute less
+ @param blkLocations The list of block locations
+ @param offset
+ @param splitSize
+ @return array of hosts that contribute most to this split
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A base class for file-based {@link InputFormat}.
+
+ <p><code>FileInputFormat</code> is the base class for all file-based
+ <code>InputFormat</code>s. This provides a generic implementation of
+ {@link #getSplits(JobConf, int)}.
+ Subclasses of <code>FileInputFormat</code> can also override the
+ {@link #isSplitable(FileSystem, Path)} method to ensure input-files are
+ not split-up and are processed as a whole by {@link Mapper}s.
+ @deprecated Use {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat}
+ instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileOutputCommitter -->
+ <class name="FileOutputCommitter" extends="org.apache.hadoop.mapred.OutputCommitter"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileOutputCommitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setupJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cleanupJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setupTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="commitTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="abortTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="needsTaskCommit" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TEMP_DIR_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Temporary directory name]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An {@link OutputCommitter} that commits files specified
+ in job output directory i.e. ${mapred.output.dir}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileOutputCommitter -->
+ <!-- start class org.apache.hadoop.mapred.FileOutputFormat -->
+ <class name="FileOutputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat"/>
+ <constructor name="FileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setCompressOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+ <code>false</code> otherwise]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="codecClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+ compress the job outputs]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressorClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="defaultValue" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the
+ job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the {@link Path} of the output directory for the map-reduce job.
+
+ @param conf The configuration of the job.
+ @param outputDir the {@link Path} of the output directory for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the output directory for the map-reduce job.
+
+ @return the {@link Path} to the output directory for the map-reduce job.
+ @see FileOutputFormat#getWorkOutputPath(JobConf)]]>
+ </doc>
+ </method>
+ <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the task's temporary output directory
+ for the map-reduce job
+
+ <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4>
+
+ <p><i>Note:</i> The following is valid only if the {@link OutputCommitter}
+ is {@link FileOutputCommitter}. If <code>OutputCommitter</code> is not
+ a <code>FileOutputCommitter</code>, the task's temporary output
+ directory is same as {@link #getOutputPath(JobConf)} i.e.
+ <tt>${mapred.output.dir}$</tt></p>
+
+ <p>Some applications need to create/write-to side-files, which differ from
+ the actual job-outputs.
+
+ <p>In such cases there could be issues with 2 instances of the same TIP
+ (running simultaneously e.g. speculative tasks) trying to open/write-to the
+ same file (path) on HDFS. Hence the application-writer will have to pick
+ unique names per task-attempt (e.g. using the attemptid, say
+ <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p>
+
+ <p>To get around this the Map-Reduce framework helps the application-writer
+ out by maintaining a special
+ <tt>${mapred.output.dir}/_temporary/_${taskid}</tt>
+ sub-directory for each task-attempt on HDFS where the output of the
+ task-attempt goes. On successful completion of the task-attempt the files
+ in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only)
+ are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the
+ framework discards the sub-directory of unsuccessful task-attempts. This
+ is completely transparent to the application.</p>
+
+ <p>The application-writer can take advantage of this by creating any
+ side-files required in <tt>${mapred.work.output.dir}</tt> during execution
+ of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the
+ framework will move them out similarly - thus she doesn't have to pick
+ unique paths per task-attempt.</p>
+
+ <p><i>Note</i>: the value of <tt>${mapred.work.output.dir}</tt> during
+ execution of a particular task-attempt is actually
+ <tt>${mapred.output.dir}/_temporary/_{$taskid}</tt>, and this value is
+ set by the map-reduce framework. So, just create any side-files in the
+ path returned by {@link #getWorkOutputPath(JobConf)} from map/reduce
+ task to take advantage of this feature.</p>
+
+ <p>The entire discussion holds true for maps of jobs with
+ reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
+ goes directly to HDFS.</p>
+
+ @return the {@link Path} to the task's temporary output directory
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to create the task's temporary output directory and
+ return the path to the task's output file.
+
+ @param conf job-configuration
+ @param name temporary task-output filename
+ @return path to the task's temporary output file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUniqueName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Helper function to generate a name that is unique for the task.
+
+ <p>The generated name can be used to create custom files from within the
+ different tasks for the job, the names for different tasks will not collide
+ with each other.</p>
+
+ <p>The given name is postfixed with the task type, 'm' for maps, 'r' for
+ reduces and the task partition number. For example, give a name 'test'
+ running on the first map o the job the generated name will be
+ 'test-m-00000'.</p>
+
+ @param conf the configuration for the job.
+ @param name the name to make unique.
+ @return a unique name accross all tasks of the job.]]>
+ </doc>
+ </method>
+ <method name="getPathForCustomFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Helper function to generate a {@link Path} for a file that is unique for
+ the task within the job output directory.
+
+ <p>The path can be used to create custom files from within the map and
+ reduce tasks. The path name will be unique for each task. The path parent
+ will be the job output directory.</p>ls
+
+ <p>This method uses the {@link #getUniqueName} method to make the file name
+ unique for the task.</p>
+
+ @param conf the configuration for the job.
+ @param name the name for the file.
+ @return a unique path accross all tasks of the job.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base class for {@link OutputFormat}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.FileSplit -->
+ <class name="FileSplit" extends="org.apache.hadoop.mapreduce.InputSplit"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.lib.input.FileSplit}
+ instead.">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="deprecated, no comment">
+ <doc>
+ <![CDATA[Constructs a split.
+ @deprecated
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process]]>
+ </doc>
+ </constructor>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null]]>
+ </doc>
+ </constructor>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file containing this split's data.]]>
+ </doc>
+ </method>
+ <method name="getStart" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The position of the first byte in the file to process.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the file to process.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A section of an input file. Returned by {@link
+ InputFormat#getSplits(JobConf, int)} and passed to
+ {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}.
+ @deprecated Use {@link org.apache.hadoop.mapreduce.lib.input.FileSplit}
+ instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.FileSplit -->
+ <!-- start class org.apache.hadoop.mapred.ID -->
+ <class name="ID" extends="org.apache.hadoop.mapreduce.ID"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ID" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructs an ID object from the given int]]>
+ </doc>
+ </constructor>
+ <constructor name="ID"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A general identifier, which internally stores the id
+ as an integer. This is the super class of {@link JobID},
+ {@link TaskID} and {@link TaskAttemptID}.
+
+ @see JobID
+ @see TaskID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.ID -->
+ <!-- start interface org.apache.hadoop.mapred.InputFormat -->
+ <interface name="InputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.InputFormat} instead.">
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically split the set of input files for the job.
+
+ <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper}
+ for processing.</p>
+
+ <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the
+ input files are not physically split into chunks. For e.g. a split could
+ be <i>&lt;input-file-path, start, offset&gt;</i> tuple.
+
+ @param job job configuration.
+ @param numSplits the desired number of splits, a hint.
+ @return an array of {@link InputSplit}s for the job.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordReader} for the given {@link InputSplit}.
+
+ <p>It is the responsibility of the <code>RecordReader</code> to respect
+ record boundaries while processing the logical split to present a
+ record-oriented view to the individual task.</p>
+
+ @param split the {@link InputSplit}
+ @param job the job that this split belongs to
+ @return a {@link RecordReader}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputFormat</code> describes the input-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the input-specification of the job.
+ <li>
+ Split-up the input file(s) into logical {@link InputSplit}s, each of
+ which is then assigned to an individual {@link Mapper}.
+ </li>
+ <li>
+ Provide the {@link RecordReader} implementation to be used to glean
+ input records from the logical <code>InputSplit</code> for processing by
+ the {@link Mapper}.
+ </li>
+ </ol>
+
+ <p>The default behavior of file-based {@link InputFormat}s, typically
+ sub-classes of {@link FileInputFormat}, is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of the input files. However, the {@link FileSystem} blocksize of
+ the input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../mapred-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Clearly, logical splits based on input-size is insufficient for many
+ applications since record boundaries are to respected. In such cases, the
+ application has to also implement a {@link RecordReader} on whom lies the
+ responsibilty to respect record-boundaries and present a record-oriented
+ view of the logical <code>InputSplit</code> to the individual task.
+
+ @see InputSplit
+ @see RecordReader
+ @see JobClient
+ @see FileInputFormat
+ @deprecated Use {@link org.apache.hadoop.mapreduce.InputFormat} instead.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.InputSplit -->
+ <interface name="InputSplit" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.InputSplit} instead.">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the total number of bytes in the data of the <code>InputSplit</code>.
+
+ @return the number of bytes in the input split.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the list of hostnames where the input split is located.
+
+ @return list of hostnames where data of the <code>InputSplit</code> is
+ located as an array of <code>String</code>s.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputSplit</code> represents the data to be processed by an
+ individual {@link Mapper}.
+
+ <p>Typically, it presents a byte-oriented view on the input and is the
+ responsibility of {@link RecordReader} of the job to process this and present
+ a record-oriented view.
+
+ @see InputFormat
+ @see RecordReader
+ @deprecated Use {@link org.apache.hadoop.mapreduce.InputSplit} instead.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.InputSplit -->
+ <!-- start class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <class name="InvalidFileTypeException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidFileTypeException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidFileTypeException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Used when file type differs from the desired file type. like
+ getting a file when a directory is expected. Or a wrong file type.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidFileTypeException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidInputException -->
+ <class name="InvalidInputException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidInputException" type="java.util.List"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create the exception with the given list.
+ @param probs the list of problems to report. this list is not copied.]]>
+ </doc>
+ </constructor>
+ <method name="getProblems" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete list of the problems reported.
+ @return the list of problems, which must not be modified]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get a summary message of the problems found.
+ @return the concatenated messages from all of the problems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class wraps a list of problems with the input, so that the user
+ can get a list of problems together instead of finding and fixing them one
+ by one.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidInputException -->
+ <!-- start class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <class name="InvalidJobConfException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidJobConfException"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="InvalidJobConfException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[This exception is thrown when jobconf misses some mendatory attributes
+ or value of some attributes is invalid.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.InvalidJobConfException -->
+ <!-- start class org.apache.hadoop.mapred.IsolationRunner -->
+ <class name="IsolationRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IsolationRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Run a single task
+ @param args the first argument is the task directory]]>
+ </doc>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.IsolationRunner -->
+ <!-- start class org.apache.hadoop.mapred.JobClient -->
+ <class name="JobClient" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="JobClient"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job client.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client with the given {@link JobConf}, and connect to the
+ default {@link JobTracker}.
+
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <constructor name="JobClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a job client, connect to the indicated job tracker.
+
+ @param jobTrackAddr the job tracker to connect to.
+ @param conf configuration.]]>
+ </doc>
+ </constructor>
+ <method name="init"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Connect to the default {@link JobTracker}.
+ @param conf the job configuration.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the <code>JobClient</code>.]]>
+ </doc>
+ </method>
+ <method name="getFs" return="org.apache.hadoop.fs.FileSystem"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get a filesystem handle. We need this to prepare jobs
+ for submission to the MapReduce system.
+
+ @return the filesystem handle.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobFile" type="java.lang.String"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param jobFile the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the MR system.
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param job the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws FileNotFoundException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="submitJobInternal" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Internal method for submitting jobs to the system.
+ @param job the configuration to submit
+ @return a proxy object for the running job
+ @throws FileNotFoundException
+ @throws ClassNotFoundException
+ @throws InterruptedException
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isJobDirValid" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobDirPath" type="org.apache.hadoop.fs.Path"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Checks if the job directory is clean and has all the required components
+ for (re) starting the job]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an {@link RunningJob} object to track an ongoing job. Returns
+ null if the id does not correspond to any known job.
+
+ @param jobid the jobid of the job.
+ @return the {@link RunningJob} handle to track the job, null if the
+ <code>jobid</code> doesn't correspond to any known job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getJob(JobID)}.">
+ <param name="jobid" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getJob(JobID)}.]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the map tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the map tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getMapTaskReports(JobID)}">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getMapTaskReports(JobID)}]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the reduce tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the reduce tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCleanupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the cleanup tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the cleanup tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getSetupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the information of the current state of the setup tasks of a job.
+
+ @param jobId the job to query.
+ @return the list of all of the setup tips.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #getReduceTaskReports(JobID)}">
+ <param name="jobId" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #getReduceTaskReports(JobID)}]]>
+ </doc>
+ </method>
+ <method name="displayTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="type" type="java.lang.String"/>
+ <param name="state" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Display the information about a job's tasks, of a particular type and
+ in a particular state
+
+ @param jobId the ID of the job
+ @param type the type of the task (map/reduce/setup/cleanup)
+ @param state the state of the task
+ (pending/running/completed/failed/killed)]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the Map-Reduce cluster.
+
+ @return the status information about the Map-Reduce cluster as an object
+ of {@link ClusterStatus}.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="detailed" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the Map-Reduce cluster.
+
+ @param detailed if true then get a detailed status including the
+ tracker names
+ @return the status information about the Map-Reduce cluster as an object
+ of {@link ClusterStatus}.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are not completed and not failed.
+
+ @return array of {@link JobStatus} for the running/to-be-run jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the jobs that are submitted.
+
+ @return array of {@link JobStatus} for the submitted jobs.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="runJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Utility that submits a job, then polls for progress until the job is
+ complete.
+
+ @param job the job configuration.
+ @throws IOException if the job fails]]>
+ </doc>
+ </method>
+ <method name="monitorAndPrintJob" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="job" type="org.apache.hadoop.mapred.RunningJob"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Monitor a job and print status in real-time as progress is made and tasks
+ fail.
+ @param conf the job's configuration
+ @param job the job to track
+ @return true if the job succeeded
+ @throws IOException if communication to the JobTracker fails]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Sets the output filter for tasks. only those tasks are printed whose
+ output matches the filter.
+ @param newValue task filter.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the task output filter out of the JobConf.
+
+ @param job the JobConf to examine.
+ @return the filter level.]]>
+ </doc>
+ </method>
+ <method name="setTaskOutputFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+ <doc>
+ <![CDATA[Modify the JobConf to set the task output filter.
+
+ @param job the JobConf to modify.
+ @param newValue the value to set.]]>
+ </doc>
+ </method>
+ <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task output filter.
+ @return task filter.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="getDefaultMaps" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the max available Maps in the cluster.
+
+ @return the max available Maps in the cluster
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getDefaultReduces" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get status information about the max available Reduces in the cluster.
+
+ @return the max available Reduces in the cluster
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getSystemDir" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Grab the jobtracker system directory path where job-specific files are to be placed.
+
+ @return the system directory where job-specific files are to be placed.]]>
+ </doc>
+ </method>
+ <method name="getQueues" return="org.apache.hadoop.mapred.JobQueueInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return an array of queue information objects about all the Job Queues
+ configured.
+
+ @return Array of JobQueueInfo objects
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJobsFromQueue" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queueName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets all the jobs which were added to particular Job Queue
+
+ @param queueName name of the Job Queue
+ @return Array of jobs present in the job queue
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getQueueInfo" return="org.apache.hadoop.mapred.JobQueueInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queueName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the queue information associated to a particular Job Queue
+
+ @param queueName name of the job queue.
+ @return Queue information associated to particular queue.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[<code>JobClient</code> is the primary interface for the user-job to interact
+ with the {@link JobTracker}.
+
+ <code>JobClient</code> provides facilities to submit jobs, track their
+ progress, access component-tasks' reports/logs, get the Map-Reduce cluster
+ status information etc.
+
+ <p>The job submission process involves:
+ <ol>
+ <li>
+ Checking the input and output specifications of the job.
+ </li>
+ <li>
+ Computing the {@link InputSplit}s for the job.
+ </li>
+ <li>
+ Setup the requisite accounting information for the {@link DistributedCache}
+ of the job, if necessary.
+ </li>
+ <li>
+ Copying the job's jar and configuration to the map-reduce system directory
+ on the distributed file-system.
+ </li>
+ <li>
+ Submitting the job to the <code>JobTracker</code> and optionally monitoring
+ it's status.
+ </li>
+ </ol></p>
+
+ Normally the user creates the application, describes various facets of the
+ job via {@link JobConf} and then uses the <code>JobClient</code> to submit
+ the job and monitor its progress.
+
+ <p>Here is an example on how to use <code>JobClient</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ job.setInputPath(new Path("in"));
+ job.setOutputPath(new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ // Submit the job, then poll for progress until the job is complete
+ JobClient.runJob(job);
+ </pre></blockquote></p>
+
+ <h4 id="JobControl">Job Control</h4>
+
+ <p>At times clients would chain map-reduce jobs to accomplish complex tasks
+ which cannot be done via a single map-reduce job. This is fairly easy since
+ the output of the job, typically, goes to distributed file-system and that
+ can be used as the input for the next job.</p>
+
+ <p>However, this also means that the onus on ensuring jobs are complete
+ (success/failure) lies squarely on the clients. In such situations the
+ various job-control options are:
+ <ol>
+ <li>
+ {@link #runJob(JobConf)} : submits the job and returns only after
+ the job has completed.
+ </li>
+ <li>
+ {@link #submitJob(JobConf)} : only submits the job, then poll the
+ returned handle to the {@link RunningJob} to query status and make
+ scheduling decisions.
+ </li>
+ <li>
+ {@link JobConf#setJobEndNotificationURI(String)} : setup a notification
+ on job-completion, thus avoiding polling.
+ </li>
+ </ol></p>
+
+ @see JobConf
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient -->
+ <!-- start class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <class name="JobClient.TaskStatusFilter" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="NONE" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="KILLED" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SUCCEEDED" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ALL" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
+ <!-- start class org.apache.hadoop.mapred.JobConf -->
+ <class name="JobConf" extends="org.apache.hadoop.conf.Configuration"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link Configuration} instead">
+ <constructor name="JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce job configuration.
+
+ @param conf a Configuration whose settings will be inherited.
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="org.apache.hadoop.fs.Path"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobConf" type="boolean"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A new map/reduce configuration where the behavior of reading from the
+ default resources can be turned off.
+ <p/>
+ If the parameter {@code loadDefaults} is false, the new instance
+ will not load resources from the default files.
+
+ @param loadDefaults specifies whether to load from the default files]]>
+ </doc>
+ </constructor>
+ <method name="getJar" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user jar for the map-reduce job.
+
+ @return the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJar"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jar" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user jar for the map-reduce job.
+
+ @param jar the user jar for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setJarByClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the job's jar file by finding an example class location.
+
+ @param cls the example class.]]>
+ </doc>
+ </method>
+ <method name="getLocalDirs" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="deleteLocalFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="subdir" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pathString" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Constructs a local file name. Files are distributed among configured
+ local directories.]]>
+ </doc>
+ </method>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reported username for this job.
+
+ @return the username]]>
+ </doc>
+ </method>
+ <method name="setUser"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="user" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the reported username for this job.
+
+ @param user the username for this job.]]>
+ </doc>
+ </method>
+ <method name="setKeepFailedTaskFiles"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the framework should keep the intermediate files for
+ failed tasks.
+
+ @param keep <code>true</code> if framework should keep the intermediate files
+ for failed tasks, <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="getKeepFailedTaskFiles" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should the temporary files for failed tasks be kept?
+
+ @return should the files be kept?]]>
+ </doc>
+ </method>
+ <method name="setKeepTaskFilesPattern"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pattern" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set a regular expression for task names that should be kept.
+ The regular expression ".*_m_000123_0" would keep the files
+ for the first instance of map 123 that ran.
+
+ @param pattern the java.util.regex.Pattern to match against the
+ task names.]]>
+ </doc>
+ </method>
+ <method name="getKeepTaskFilesPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the regular expression that is matched against the task names
+ to see if we need to keep the files.
+
+ @return the pattern as a string, if it was set, othewise null.]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the current working directory for the default file system.
+
+ @param dir the new current working directory.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the current working directory for the default file system.
+
+ @return the directory name.]]>
+ </doc>
+ </method>
+ <method name="setNumTasksToExecutePerJvm"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="numTasks" type="int"/>
+ <doc>
+ <![CDATA[Sets the number of tasks that a spawned task JVM should run
+ before it exits
+ @param numTasks the number of tasks to execute; defaults to 1;
+ -1 signifies no limit]]>
+ </doc>
+ </method>
+ <method name="getNumTasksToExecutePerJvm" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the number of tasks that a spawned JVM should execute]]>
+ </doc>
+ </method>
+ <method name="getInputFormat" return="org.apache.hadoop.mapred.InputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link InputFormat} implementation for the map-reduce job,
+ defaults to {@link TextInputFormat} if not specified explicity.
+
+ @return the {@link InputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the {@link InputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link InputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="getOutputFormat" return="org.apache.hadoop.mapred.OutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link OutputFormat} implementation for the map-reduce job,
+ defaults to {@link TextOutputFormat} if not specified explicity.
+
+ @return the {@link OutputFormat} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getOutputCommitter" return="org.apache.hadoop.mapred.OutputCommitter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link OutputCommitter} implementation for the map-reduce job,
+ defaults to {@link FileOutputCommitter} if not specified explicitly.
+
+ @return the {@link OutputCommitter} implementation for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setOutputCommitter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the {@link OutputCommitter} implementation for the map-reduce job.
+
+ @param theClass the {@link OutputCommitter} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="setOutputFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the {@link OutputFormat} implementation for the map-reduce job.
+
+ @param theClass the {@link OutputFormat} implementation for the map-reduce
+ job.]]>
+ </doc>
+ </method>
+ <method name="setCompressMapOutput"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Should the map outputs be compressed before transfer?
+ Uses the SequenceFile compression.
+
+ @param compress should the map outputs be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressMapOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Are the outputs of the maps be compressed?
+
+ @return <code>true</code> if the outputs of the maps are to be compressed,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="codecClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the given class as the {@link CompressionCodec} for the map outputs.
+
+ @param codecClass the {@link CompressionCodec} class that will compress
+ the map outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputCompressorClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="defaultValue" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the map outputs.
+
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} class that should be used to compress the
+ map outputs.
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getMapOutputKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the map output data. If it is not set, use the
+ (final) output key class. This allows the map output key class to be
+ different than the final output key class.
+
+ @return the map output key class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the key class for the map output data. This allows the user to
+ specify the map output key class to be different than the final output
+ value class.
+
+ @param theClass the map output key class.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for the map output data. If it is not set, use the
+ (final) output value class This allows the map output value class to be
+ different than the final output value class.
+
+ @return the map output value class.]]>
+ </doc>
+ </method>
+ <method name="setMapOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the value class for the map output data. This allows the user to
+ specify the map output value class to be different than the final output
+ value class.
+
+ @param theClass the map output value class.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the job output data.
+
+ @return the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the key class for the job output data.
+
+ @param theClass the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link RawComparator} comparator used to compare keys.
+
+ @return the {@link RawComparator} comparator used to compare keys.]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyComparatorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the {@link RawComparator} comparator used to compare keys.
+
+ @param theClass the {@link RawComparator} comparator used to
+ compare keys.
+ @see #setOutputValueGroupingComparator(Class)]]>
+ </doc>
+ </method>
+ <method name="setKeyFieldComparatorOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keySpec" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the {@link KeyFieldBasedComparator} options used to compare keys.
+
+ @param keySpec the key specification of the form -k pos1[,pos2], where,
+ pos is of the form f[.c][opts], where f is the number
+ of the key field to use, and c is the number of the first character from
+ the beginning of the field. Fields and character posns are numbered
+ starting with 1; a character position of zero in pos2 indicates the
+ field's last character. If '.c' is omitted from pos1, it defaults to 1
+ (the beginning of the field); if omitted from pos2, it defaults to 0
+ (the end of the field). opts are ordering options. The supported options
+ are:
+ -n, (Sort numerically)
+ -r, (Reverse the result of comparison)]]>
+ </doc>
+ </method>
+ <method name="getKeyFieldComparatorOption" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link KeyFieldBasedComparator} options]]>
+ </doc>
+ </method>
+ <method name="setKeyFieldPartitionerOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="keySpec" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the {@link KeyFieldBasedPartitioner} options used for
+ {@link Partitioner}
+
+ @param keySpec the key specification of the form -k pos1[,pos2], where,
+ pos is of the form f[.c][opts], where f is the number
+ of the key field to use, and c is the number of the first character from
+ the beginning of the field. Fields and character posns are numbered
+ starting with 1; a character position of zero in pos2 indicates the
+ field's last character. If '.c' is omitted from pos1, it defaults to 1
+ (the beginning of the field); if omitted from pos2, it defaults to 0
+ (the end of the field).]]>
+ </doc>
+ </method>
+ <method name="getKeyFieldPartitionerOption" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link KeyFieldBasedPartitioner} options]]>
+ </doc>
+ </method>
+ <method name="getOutputValueGroupingComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user defined {@link WritableComparable} comparator for
+ grouping keys of inputs to the reduce.
+
+ @return comparator set by the user for grouping values.
+ @see #setOutputValueGroupingComparator(Class) for details.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueGroupingComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the user defined {@link RawComparator} comparator for
+ grouping keys in the input to the reduce.
+
+ <p>This comparator should be provided if the equivalence rules for keys
+ for sorting the intermediates are different from those for grouping keys
+ before each call to
+ {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
+
+ <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
+ in a single call to the reduce function if K1 and K2 compare as equal.</p>
+
+ <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
+ how keys are sorted, this can be used in conjunction to simulate
+ <i>secondary sort on values</i>.</p>
+
+ <p><i>Note</i>: This is not a guarantee of the reduce sort being
+ <i>stable</i> in any sense. (In any case, with the order of available
+ map-outputs to the reduce being non-deterministic, it wouldn't make
+ that much sense.)</p>
+
+ @param theClass the comparator class to be used for grouping keys.
+ It should implement <code>RawComparator</code>.
+ @see #setOutputKeyComparatorClass(Class)]]>
+ </doc>
+ </method>
+ <method name="getUseNewMapper" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should the framework use the new context-object code for running
+ the mapper?
+ @return true, if the new api should be used]]>
+ </doc>
+ </method>
+ <method name="setUseNewMapper"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="flag" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the framework should use the new api for the mapper.
+ This is the default for jobs submitted with the new Job api.
+ @param flag true, if the new api should be used]]>
+ </doc>
+ </method>
+ <method name="getUseNewReducer" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should the framework use the new context-object code for running
+ the reducer?
+ @return true, if the new api should be used]]>
+ </doc>
+ </method>
+ <method name="setUseNewReducer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="flag" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the framework should use the new api for the reducer.
+ This is the default for jobs submitted with the new Job api.
+ @param flag true, if the new api should be used]]>
+ </doc>
+ </method>
+ <method name="getOutputValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for job outputs.
+
+ @return the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="setOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the value class for job outputs.
+
+ @param theClass the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapperClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Mapper} class for the job.
+
+ @return the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapperClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the {@link Mapper} class for the job.
+
+ @param theClass the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getMapRunnerClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link MapRunnable} class for the job.
+
+ @return the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setMapRunnerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Expert: Set the {@link MapRunnable} class for the job.
+
+ Typically used to exert greater control on {@link Mapper}s.
+
+ @param theClass the {@link MapRunnable} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getPartitionerClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Partitioner} used to partition {@link Mapper}-outputs
+ to be sent to the {@link Reducer}s.
+
+ @return the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setPartitionerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the {@link Partitioner} class used to partition
+ {@link Mapper}-outputs to be sent to the {@link Reducer}s.
+
+ @param theClass the {@link Partitioner} used to partition map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getReducerClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link Reducer} class for the job.
+
+ @return the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="setReducerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the {@link Reducer} class for the job.
+
+ @param theClass the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getCombinerClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers. Typically the combiner is same as the
+ the {@link Reducer} for the job i.e. {@link #getReducerClass()}.
+
+ @return the user-defined combiner class used to combine map-outputs.]]>
+ </doc>
+ </method>
+ <method name="setCombinerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the user-defined <i>combiner</i> class used to combine map-outputs
+ before being sent to the reducers.
+
+ <p>The combiner is an application-specified aggregation operation, which
+ can help cut down the amount of data transferred between the
+ {@link Mapper} and the {@link Reducer}, leading to better performance.</p>
+
+ <p>The framework may invoke the combiner 0, 1, or multiple times, in both
+ the mapper and reducer tasks. In general, the combiner is called as the
+ sort/merge result is written to disk. The combiner must:
+ <ul>
+ <li> be side-effect free</li>
+ <li> have the same input and output key types and the same input and
+ output value types</li>
+ </ul></p>
+
+ <p>Typically the combiner is same as the <code>Reducer</code> for the
+ job i.e. {@link #setReducerClass(Class)}.</p>
+
+ @param theClass the user-defined combiner class used to combine
+ map-outputs.]]>
+ </doc>
+ </method>
+ <method name="getSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on, else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getMapSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for map tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be
+ used for this job for map tasks,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setMapSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for map tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for map tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getReduceSpeculativeExecution" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Should speculative execution be used for this job for reduce tasks?
+ Defaults to <code>true</code>.
+
+ @return <code>true</code> if speculative execution be used
+ for reduce tasks for this job,
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setReduceSpeculativeExecution"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="speculativeExecution" type="boolean"/>
+ <doc>
+ <![CDATA[Turn speculative execution on or off for this job for reduce tasks.
+
+ @param speculativeExecution <code>true</code> if speculative execution
+ should be turned on for reduce tasks,
+ else <code>false</code>.]]>
+ </doc>
+ </method>
+ <method name="getNumMapTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job.
+ Defaults to <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumMapTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the number of map tasks for this job.
+
+ <p><i>Note</i>: This is only a <i>hint</i> to the framework. The actual
+ number of spawned map tasks depends on the number of {@link InputSplit}s
+ generated by the job's {@link InputFormat#getSplits(JobConf, int)}.
+
+ A custom {@link InputFormat} is typically used to accurately control
+ the number of map tasks for the job.</p>
+
+ <h4 id="NoOfMaps">How many maps?</h4>
+
+ <p>The number of maps is usually driven by the total size of the inputs
+ i.e. total number of blocks of the input files.</p>
+
+ <p>The right level of parallelism for maps seems to be around 10-100 maps
+ per-node, although it has been set up to 300 or so for very cpu-light map
+ tasks. Task setup takes awhile, so it is best if the maps take at least a
+ minute to execute.</p>
+
+ <p>The default behavior of file-based {@link InputFormat}s is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of input files. However, the {@link FileSystem} blocksize of the
+ input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../mapred-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Thus, if you expect 10TB of input data and have a blocksize of 128MB,
+ you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is
+ used to set it even higher.</p>
+
+ @param n the number of map tasks for this job.
+ @see InputFormat#getSplits(JobConf, int)
+ @see FileInputFormat
+ @see FileSystem#getDefaultBlockSize()
+ @see FileStatus#getBlockSize()]]>
+ </doc>
+ </method>
+ <method name="getNumReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job. Defaults to
+ <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="setNumReduceTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Set the requisite number of reduce tasks for this job.
+
+ <h4 id="NoOfReduces">How many reduces?</h4>
+
+ <p>The right number of reduces seems to be <code>0.95</code> or
+ <code>1.75</code> multiplied by (&lt;<i>no. of nodes</i>&gt; *
+ <a href="{@docRoot}/../mapred-default.html#mapred.tasktracker.reduce.tasks.maximum">
+ mapred.tasktracker.reduce.tasks.maximum</a>).
+ </p>
+
+ <p>With <code>0.95</code> all of the reduces can launch immediately and
+ start transfering map outputs as the maps finish. With <code>1.75</code>
+ the faster nodes will finish their first round of reduces and launch a
+ second wave of reduces doing a much better job of load balancing.</p>
+
+ <p>Increasing the number of reduces increases the framework overhead, but
+ increases load balancing and lowers the cost of failures.</p>
+
+ <p>The scaling factors above are slightly less than whole numbers to
+ reserve a few reduce slots in the framework for speculative-tasks, failures
+ etc.</p>
+
+ <h4 id="ReducerNone">Reducer NONE</h4>
+
+ <p>It is legal to set the number of reduce-tasks to <code>zero</code>.</p>
+
+ <p>In this case the output of the map-tasks directly go to distributed
+ file-system, to the path set by
+ {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Also, the
+ framework doesn't sort the map-outputs before writing it out to HDFS.</p>
+
+ @param n the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ map task, as specified by the <code>mapred.map.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ map task.
+
+ @param n the number of attempts per map task.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceAttempts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ reduce task, as specified by the <code>mapred.reduce.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+
+ @return the max number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ reduce task.
+
+ @param n the number of attempts per reduce task.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name. This is only used to identify the
+ job to the user.
+
+ @return the job's name, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified job name.
+
+ @param name the job's new name.]]>
+ </doc>
+ </method>
+ <method name="getSessionId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified session identifier. The default is the empty string.
+
+ The session identifier is used to tag metric data that is reported to some
+ performance metrics system via the org.apache.hadoop.metrics API. The
+ session identifier is intended, in particular, for use by Hadoop-On-Demand
+ (HOD) which allocates a virtual Hadoop cluster dynamically and transiently.
+ HOD will set the session identifier by modifying the mapred-site.xml file
+ before starting the cluster.
+
+ When not running under HOD, this identifer is expected to remain set to
+ the empty string.
+
+ @return the session identifier, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="setSessionId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="sessionId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the user-specified session identifier.
+
+ @param sessionId the new session id.]]>
+ </doc>
+ </method>
+ <method name="setMaxTaskFailuresPerTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="noFailures" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds <code>noFailures</code>, the
+ tasktracker is <i>blacklisted</i> for this job.
+
+ @param noFailures maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxTaskFailuresPerTracker" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Expert: Get the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds this, the tasktracker is
+ <i>blacklisted</i> for this job.
+
+ @return the maximum no. of failures of a given job per tasktracker.]]>
+ </doc>
+ </method>
+ <method name="getMaxMapTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of map tasks that can fail without
+ the job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed map-task results in
+ the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxMapTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Expert: Set the maximum percentage of map tasks that can fail without the
+ job being aborted.
+
+ Each map task is executed a minimum of {@link #getMaxMapAttempts} attempts
+ before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of map tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="getMaxReduceTaskFailuresPercent" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the maximum percentage of reduce tasks that can fail without
+ the job being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed reduce-task results
+ in the job being declared as {@link JobStatus#FAILED}.
+
+ @return the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setMaxReduceTaskFailuresPercent"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="percent" type="int"/>
+ <doc>
+ <![CDATA[Set the maximum percentage of reduce tasks that can fail without the job
+ being aborted.
+
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()}
+ attempts before being declared as <i>failed</i>.
+
+ @param percent the maximum percentage of reduce tasks that can fail without
+ the job being aborted.]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="prio" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Set {@link JobPriority} for this job.
+
+ @param prio the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link JobPriority} for this job.
+
+ @return the {@link JobPriority} for this job.]]>
+ </doc>
+ </method>
+ <method name="getProfileEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get whether the task profiling is enabled.
+ @return true if some tasks will be profiled]]>
+ </doc>
+ </method>
+ <method name="setProfileEnabled"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newValue" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the system should collect profiler information for some of
+ the tasks in this job? The information is stored in the user log
+ directory.
+ @param newValue true means it should be gathered]]>
+ </doc>
+ </method>
+ <method name="getProfileParams" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the profiler configuration arguments.
+
+ The default value for this property is
+ "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s"
+
+ @return the parameters to pass to the task child to configure profiling]]>
+ </doc>
+ </method>
+ <method name="setProfileParams"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the profiler configuration arguments. If the string contains a '%s' it
+ will be replaced with the name of the profiling output file when the task
+ runs.
+
+ This value is passed to the task child JVM on the command line.
+
+ @param value the configuration string]]>
+ </doc>
+ </method>
+ <method name="getProfileTaskRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <doc>
+ <![CDATA[Get the range of maps or reduces to profile.
+ @param isMap is the task a map?
+ @return the task ranges]]>
+ </doc>
+ </method>
+ <method name="setProfileTaskRange"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="isMap" type="boolean"/>
+ <param name="newValue" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the ranges of maps or reduces to profile. setProfileEnabled(true)
+ must also be called.
+ @param newValue a set of integer ranges of the map ids]]>
+ </doc>
+ </method>
+ <method name="setMapDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the map tasks fail.
+
+ <p>The debug script can aid debugging of failed map tasks. The script is
+ given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script needs to be symlinked. </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setMapDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param mDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getMapDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the map task's debug script.
+
+ @return the debug Script for the mapred job for failed map tasks.
+ @see #setMapDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="setReduceDebugScript"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rDbgScript" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the debug script to run when the reduce tasks fail.
+
+ <p>The debug script can aid debugging of failed reduce tasks. The script
+ is given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><pre><blockquote>
+ $script $stdout $stderr $syslog $jobconf.
+ </blockquote></pre></p>
+
+ <p> The script file is distributed through {@link DistributedCache}
+ APIs. The script file needs to be symlinked </p>
+
+ <p>Here is an example on how to submit a script
+ <p><blockquote><pre>
+ job.setReduceDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote></p>
+
+ @param rDbgScript the script name]]>
+ </doc>
+ </method>
+ <method name="getReduceDebugScript" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the reduce task's debug Script
+
+ @return the debug script for the mapred job for failed reduce tasks.
+ @see #setReduceDebugScript(String)]]>
+ </doc>
+ </method>
+ <method name="getJobEndNotificationURI" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ @return the job end notification uri, <code>null</code> if it hasn't
+ been set.
+ @see #setJobEndNotificationURI(String)]]>
+ </doc>
+ </method>
+ <method name="setJobEndNotificationURI"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="uri" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+
+ <p>The uri can contain 2 special parameters: <tt>$jobId</tt> and
+ <tt>$jobStatus</tt>. Those, if present, are replaced by the job's
+ identifier and completion-status respectively.</p>
+
+ <p>This is typically used by application-writers to implement chaining of
+ Map-Reduce jobs in an <i>asynchronous manner</i>.</p>
+
+ @param uri the job end notification uri
+ @see JobStatus
+ @see <a href="{@docRoot}/org/apache/hadoop/mapred/JobClient.html#JobCompletionAndChaining">Job Completion and Chaining</a>]]>
+ </doc>
+ </method>
+ <method name="getJobLocalDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get job-specific shared directory for use as scratch space
+
+ <p>
+ When a job starts, a shared directory is created at location
+ <code>
+ ${mapred.local.dir}/taskTracker/jobcache/$jobid/work/ </code>.
+ This directory is exposed to the users through
+ <code>job.local.dir </code>.
+ So, the tasks can use this space
+ as scratch space and share files among them. </p>
+ This value is available as System property also.
+
+ @return The localized job specific shared directory]]>
+ </doc>
+ </method>
+ <method name="getMaxVirtualMemoryForTask" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The maximum amount of memory any task of this job will use. See
+ {@link #MAPRED_TASK_MAXVMEM_PROPERTY}
+
+ @return The maximum amount of memory any task of this job will use, in
+ bytes.
+ @see #setMaxVirtualMemoryForTask(long)]]>
+ </doc>
+ </method>
+ <method name="setMaxVirtualMemoryForTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="vmem" type="long"/>
+ <doc>
+ <![CDATA[Set the maximum amount of memory any task of this job can use. See
+ {@link #MAPRED_TASK_MAXVMEM_PROPERTY}
+
+ @param vmem Maximum amount of virtual memory in bytes any task of this job
+ can use.
+ @see #getMaxVirtualMemoryForTask()]]>
+ </doc>
+ </method>
+ <method name="getMaxPhysicalMemoryForTask" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The maximum amount of physical memory any task of this job will use. See
+ {@link #MAPRED_TASK_MAXPMEM_PROPERTY}
+
+ @return The maximum amount of physical memory any task of this job will
+ use, in bytes.
+ @see #setMaxPhysicalMemoryForTask(long)]]>
+ </doc>
+ </method>
+ <method name="setMaxPhysicalMemoryForTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="pmem" type="long"/>
+ <doc>
+ <![CDATA[Set the maximum amount of physical memory any task of this job can use. See
+ {@link #MAPRED_TASK_MAXPMEM_PROPERTY}
+
+ @param pmem Maximum amount of physical memory in bytes any task of this job
+ can use.
+ @see #getMaxPhysicalMemoryForTask()]]>
+ </doc>
+ </method>
+ <method name="getQueueName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the name of the queue to which this job is submitted.
+ Defaults to 'default'.
+
+ @return name of the queue]]>
+ </doc>
+ </method>
+ <method name="setQueueName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queueName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the name of the queue to which this job should be submitted.
+
+ @param queueName Name of the queue]]>
+ </doc>
+ </method>
+ <field name="DISABLED_MEMORY_LIMIT" type="long"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A value which if set for memory related configuration options,
+ indicates that the options are turned off.]]>
+ </doc>
+ </field>
+ <field name="DEFAULT_QUEUE_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Name of the queue to which jobs will be submitted, if no queue
+ name is mentioned.]]>
+ </doc>
+ </field>
+ <field name="MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Cluster-wide configuration to be set by the administrators that provides
+ default amount of maximum virtual memory for job's tasks. This has to be
+ set on both the JobTracker node for the sake of scheduling decisions and on
+ the TaskTracker nodes for the sake of memory management.
+
+ <p>
+
+ If a job doesn't specify its virtual memory requirement by setting
+ {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to {@link #DISABLED_MEMORY_LIMIT},
+ tasks are assured a memory limit set to this property. This property is
+ disabled by default, and if not explicitly set to a valid value by the
+ administrators and if a job doesn't specify its virtual memory
+ requirements, the job's tasks will not be assured anything and may be
+ killed by a TT that intends to control the total memory usage of the tasks
+ via memory management functionality.
+
+ <p>
+
+ This value should in general be less than the cluster-wide configuration
+ {@link #UPPER_LIMIT_ON_TASK_VMEM_PROPERTY} . If not or if it not set,
+ TaskTracker's memory management may be disabled and a scheduler's memory
+ based scheduling decisions will be affected. Please refer to the
+ documentation of the configured scheduler to see how this property is used.]]>
+ </doc>
+ </field>
+ <field name="MAPRED_TASK_MAXVMEM_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The maximum amount of memory any task of this job will use.
+
+ <p>
+
+ This value will be used by TaskTrackers for monitoring the memory usage of
+ tasks of this jobs. If a TaskTracker's memory management functionality is
+ enabled, each task of this job will be allowed to use a maximum virtual
+ memory specified by this property. If the task's memory usage goes over
+ this value, the task will be failed by the TT. If not set, the cluster-wide
+ configuration {@link #MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY} is used as the
+ default value for memory requirements. If this property cascaded with
+ {@link #MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY} becomes equal to -1, job's
+ tasks will not be assured anything and may be killed by a TT that intends
+ to control the total memory usage of the tasks via memory management
+ functionality. If the memory management functionality is disabled on a TT,
+ this value is ignored.
+
+ <p>
+
+ This value should also be not more than the cluster-wide configuration
+ {@link #UPPER_LIMIT_ON_TASK_VMEM_PROPERTY} which has to be set by the site
+ administrators.
+
+ <p>
+
+ This value may be used by schedulers that support scheduling based on job's
+ memory requirements. In general, a task of this job will be scheduled on a
+ TaskTracker only if the amount of virtual memory still unoccupied on the
+ TaskTracker is greater than or equal to this value. But different
+ schedulers can take different decisions. Please refer to the documentation
+ of the scheduler being configured to see if it does memory based scheduling
+ and if it does, how this property is used by that scheduler.
+
+ @see #setMaxVirtualMemoryForTask(long)
+ @see #getMaxVirtualMemoryForTask()]]>
+ </doc>
+ </field>
+ <field name="MAPRED_TASK_MAXPMEM_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The maximum amount of physical memory any task of a job will use.
+
+ <p>
+
+ This value may be used by schedulers that support scheduling based on job's
+ memory requirements. In general, a task of this job will be scheduled on a
+ TaskTracker, only if the amount of physical memory still unoccupied on the
+ TaskTracker is greater than or equal to this value. But different
+ schedulers can take different decisions. Please refer to the documentation
+ of the scheduler being configured to see how it does memory based
+ scheduling and how this variable is used by that scheduler.
+
+ @see #setMaxPhysicalMemoryForTask(long)
+ @see #getMaxPhysicalMemoryForTask()]]>
+ </doc>
+ </field>
+ <field name="UPPER_LIMIT_ON_TASK_VMEM_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Cluster-wide configuration to be set by the site administrators that
+ provides an upper limit on the maximum virtual memory that can be specified
+ by a job. The job configuration {@link #MAPRED_TASK_MAXVMEM_PROPERTY} and
+ the cluster-wide configuration
+ {@link #MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY} should, by definition, be
+ less than this value. If the job configuration
+ {@link #MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY} is more than this value,
+ depending on the scheduler being configured, the job may be rejected or the
+ job configuration may just be ignored.
+
+ <p>
+
+ If it is not set on a TaskTracker, TaskTracker's memory management will be
+ disabled.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A map/reduce job configuration.
+
+ <p><code>JobConf</code> is the primary interface for a user to describe a
+ map-reduce job to the Hadoop framework for execution. The framework tries to
+ faithfully execute the job as-is described by <code>JobConf</code>, however:
+ <ol>
+ <li>
+ Some configuration parameters might have been marked as
+ <a href="{@docRoot}/org/apache/hadoop/conf/Configuration.html#FinalParams">
+ final</a> by administrators and hence cannot be altered.
+ </li>
+ <li>
+ While some job parameters are straight-forward to set
+ (e.g. {@link #setNumReduceTasks(int)}), some parameters interact subtly
+ rest of the framework and/or job-configuration and is relatively more
+ complex for the user to control finely (e.g. {@link #setNumMapTasks(int)}).
+ </li>
+ </ol></p>
+
+ <p><code>JobConf</code> typically specifies the {@link Mapper}, combiner
+ (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and
+ {@link OutputFormat} implementations to be used etc.
+
+ <p>Optionally <code>JobConf</code> is used to specify other advanced facets
+ of the job such as <code>Comparator</code>s to be used, files to be put in
+ the {@link DistributedCache}, whether or not intermediate and/or job outputs
+ are to be compressed (and how), debugability via user-provided scripts
+ ( {@link #setMapDebugScript(String)}/{@link #setReduceDebugScript(String)}),
+ for doing post-processing on task logs, task's stdout, stderr, syslog.
+ and etc.</p>
+
+ <p>Here is an example on how to configure a job via <code>JobConf</code>:</p>
+ <p><blockquote><pre>
+ // Create a new JobConf
+ JobConf job = new JobConf(new Configuration(), MyJob.class);
+
+ // Specify various job-specific parameters
+ job.setJobName("myjob");
+
+ FileInputFormat.setInputPaths(job, new Path("in"));
+ FileOutputFormat.setOutputPath(job, new Path("out"));
+
+ job.setMapperClass(MyJob.MyMapper.class);
+ job.setCombinerClass(MyJob.MyReducer.class);
+ job.setReducerClass(MyJob.MyReducer.class);
+
+ job.setInputFormat(SequenceFileInputFormat.class);
+ job.setOutputFormat(SequenceFileOutputFormat.class);
+ </pre></blockquote></p>
+
+ @see JobClient
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache
+ @deprecated Use {@link Configuration} instead]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobConf -->
+ <!-- start interface org.apache.hadoop.mapred.JobConfigurable -->
+ <interface name="JobConfigurable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Initializes a new instance from a {@link JobConf}.
+
+ @param job the configuration]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[That what may be configured.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobConfigurable -->
+ <!-- start class org.apache.hadoop.mapred.JobContext -->
+ <class name="JobContext" extends="org.apache.hadoop.mapreduce.JobContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.JobContext} instead.">
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job Configuration
+
+ @return JobConf]]>
+ </doc>
+ </method>
+ <method name="getProgressible" return="org.apache.hadoop.util.Progressable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the progress mechanism for reporting progress.
+
+ @return progress mechanism]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[@deprecated Use {@link org.apache.hadoop.mapreduce.JobContext} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobContext -->
+ <!-- start class org.apache.hadoop.mapred.JobEndNotifier -->
+ <class name="JobEndNotifier" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobEndNotifier"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="startNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="stopNotifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="registerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ <method name="localRunnerNotification"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="status" type="org.apache.hadoop.mapred.JobStatus"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobEndNotifier -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory -->
+ <class name="JobHistory" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="init" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="hostname" type="java.lang.String"/>
+ <param name="jobTrackerStartTime" type="long"/>
+ <doc>
+ <![CDATA[Initialize JobHistory files.
+ @param conf Jobconf of the job tracker.
+ @param hostname jobtracker's hostname
+ @param jobTrackerStartTime jobtracker's start time
+ @return true if intialized properly
+ false otherwise]]>
+ </doc>
+ </method>
+ <method name="parseHistoryFromFS"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="java.lang.String"/>
+ <param name="l" type="org.apache.hadoop.mapred.JobHistory.Listener"/>
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Parses history file and invokes Listener.handle() for
+ each line of history. It can be used for looking through history
+ files for specific items without having to keep whole history in memory.
+ @param path path to history file
+ @param l Listener for history events
+ @param fs FileSystem where history file is present
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isDisableHistory" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns history disable status. by default history is enabled so this
+ method returns false.
+ @return true if history logging is disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="setDisableHistory"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="disableHistory" type="boolean"/>
+ <doc>
+ <![CDATA[Enable/disable history logging. Default value is false, so history
+ is enabled by default.
+ @param disableHistory true if history should be disabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getTaskLogsUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="attempt" type="org.apache.hadoop.mapred.JobHistory.TaskAttempt"/>
+ <doc>
+ <![CDATA[Return the TaskLogsUrl of a particular TaskAttempt
+
+ @param attempt
+ @return the taskLogsUrl. null if http-port or tracker-name or
+ task-attempt-id are unavailable.]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JOB_NAME_TRIM_LENGTH" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Provides methods for writing to and reading from job history.
+ Job History works in an append mode, JobHistory and its inner classes provide methods
+ to log job events.
+
+ JobHistory is split into multiple files, format of each file is plain text where each line
+ is of the format [type (key=value)*], where type identifies the type of the record.
+ Type maps to UID of one of the inner classes of this class.
+
+ Job history is maintained in a master index which contains star/stop times of all jobs with
+ a few other job level properties. Apart from this each job's history is maintained in a seperate history
+ file. name of job history files follows the format jobtrackerId_jobid
+
+ For parsing the job history it supports a listener based interface where each line is parsed
+ and passed to listener. The listener can create an object model of history or look for specific
+ events and discard rest of the history.
+
+ CHANGE LOG :
+ Version 0 : The history has the following format :
+ TAG KEY1="VALUE1" KEY2="VALUE2" and so on.
+ TAG can be Job, Task, MapAttempt or ReduceAttempt.
+ Note that a '"' is the line delimiter.
+ Version 1 : Changes the line delimiter to '.'
+ Values are now escaped for unambiguous parsing.
+ Added the Meta tag to store version info.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <class name="JobHistory.HistoryCleaner" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobHistory.HistoryCleaner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Cleans up history data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Delete history files older than one month. Update master index and remove all
+ jobs older than one month. Also if a job tracker has no jobs in last one month
+ remove reference to the job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.HistoryCleaner -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <class name="JobHistory.JobInfo" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.JobInfo" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create new JobInfo]]>
+ </doc>
+ </constructor>
+ <method name="getAllTasks" return="java.util.Map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all map and reduce tasks <taskid-Task>.]]>
+ </doc>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Get the path of the locally stored job file
+ @param jobId id of the job
+ @return the path of the job file on the local file system]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFile" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the path of the job-history
+ log file.
+
+ @param logFile path of the job-history file
+ @return URL encoded path
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="encodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to encode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL encoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="decodeJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Helper function to decode the URL of the filename of the job-history
+ log file.
+
+ @param logFileName file name of the job-history file
+ @return URL decoded filename
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getUserName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the user name from the job conf]]>
+ </doc>
+ </method>
+ <method name="getJobHistoryLogLocation" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the job history file path given the history filename]]>
+ </doc>
+ </method>
+ <method name="getJobHistoryLogLocationForUser" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logFileName" type="java.lang.String"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the user job history file path]]>
+ </doc>
+ </method>
+ <method name="getJobHistoryFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="id" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Recover the job history filename from the history folder.
+ Uses the following pattern
+ $jt-hostname_[0-9]*_$job-id_$user-$job-name*
+ @param jobConf the job conf
+ @param id job id]]>
+ </doc>
+ </method>
+ <method name="recoverJobHistoryFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="logFilePath" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Selects one of the two files generated as a part of recovery.
+ The thumb rule is that always select the oldest file.
+ This call makes sure that only one file is left in the end.
+ @param conf job conf
+ @param logFilePath Path of the log file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logSubmitted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="jobConfPath" type="java.lang.String"/>
+ <param name="submitTime" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Log job submitted event to history. Creates a new file in history
+ for the job. if history file creation fails, it disables history
+ for all other events.
+ @param jobId job id assigned by job tracker.
+ @param jobConf job conf of the job
+ @param jobConfPath path to job conf xml file in HDFS.
+ @param submitTime time when job tracker received the job
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="logInited"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs launch time of job.
+
+ @param jobId job id, assigned by jobtracker.
+ @param startTime start time of job.
+ @param totalMaps total maps assigned by jobtracker.
+ @param totalReduces total reduces.]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link #logInited(JobID, long, int, int)} and
+ {@link #logStarted(JobID)}">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="startTime" type="long"/>
+ <param name="totalMaps" type="int"/>
+ <param name="totalReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs the job as RUNNING.
+
+ @param jobId job id, assigned by jobtracker.
+ @param startTime start time of job.
+ @param totalMaps total maps assigned by jobtracker.
+ @param totalReduces total reduces.
+ @deprecated Use {@link #logInited(JobID, long, int, int)} and
+ {@link #logStarted(JobID)}]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Logs job as running
+ @param jobId job id, assigned by jobtracker.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="finishTime" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <param name="failedMaps" type="int"/>
+ <param name="failedReduces" type="int"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log job finished. closes the job file in history.
+ @param jobId job id, assigned by jobtracker.
+ @param finishTime finish time of job in ms.
+ @param finishedMaps no of maps successfully finished.
+ @param finishedReduces no of reduces finished sucessfully.
+ @param failedMaps no of failed map tasks.
+ @param failedReduces no of failed reduce tasks.
+ @param counters the counters from the job]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs job failed event. Closes the job history log file.
+ @param jobid job id
+ @param timestamp time when job failure was detected in ms.
+ @param finishedMaps no finished map tasks.
+ @param finishedReduces no of finished reduce tasks.]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="timestamp" type="long"/>
+ <param name="finishedMaps" type="int"/>
+ <param name="finishedReduces" type="int"/>
+ <doc>
+ <![CDATA[Logs job killed event. Closes the job history log file.
+
+ @param jobid
+ job id
+ @param timestamp
+ time when job killed was issued in ms.
+ @param finishedMaps
+ no finished map tasks.
+ @param finishedReduces
+ no of finished reduce tasks.]]>
+ </doc>
+ </method>
+ <method name="logJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="priority" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Log job's priority.
+ @param jobid job id
+ @param priority Jobs priority]]>
+ </doc>
+ </method>
+ <method name="logJobInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link #logJobInfo(JobID, long, long)} instead.">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="submitTime" type="long"/>
+ <param name="launchTime" type="long"/>
+ <param name="restartCount" type="int"/>
+ <doc>
+ <![CDATA[Log job's submit-time/launch-time
+ @param jobid job id
+ @param submitTime job's submit time
+ @param launchTime job's launch time
+ @param restartCount number of times the job got restarted
+ @deprecated Use {@link #logJobInfo(JobID, long, long)} instead.]]>
+ </doc>
+ </method>
+ <method name="logJobInfo"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="submitTime" type="long"/>
+ <param name="launchTime" type="long"/>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to job start, finish or failure.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.JobInfo -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <class name="JobHistory.Keys" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Keys[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Keys"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="JOBTRACKERID" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="START_TIME" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FINISH_TIME" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JOBID" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JOBNAME" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="USER" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JOBCONF" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SUBMIT_TIME" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LAUNCH_TIME" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TOTAL_MAPS" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TOTAL_REDUCES" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED_MAPS" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED_REDUCES" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FINISHED_MAPS" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FINISHED_REDUCES" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JOB_STATUS" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TASKID" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HOSTNAME" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TASK_TYPE" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ERROR" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TASK_ATTEMPT_ID" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TASK_STATUS" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COPY_PHASE" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SORT_PHASE" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="REDUCE_PHASE" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SHUFFLE_FINISHED" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SORT_FINISHED" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COUNTERS" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SPLITS" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="JOB_PRIORITY" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HTTP_PORT" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TRACKER_NAME" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STATE_STRING" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VERSION" type="org.apache.hadoop.mapred.JobHistory.Keys"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Job history files contain key="value" pairs, where keys belong to this enum.
+ It acts as a global namespace for all keys.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Keys -->
+ <!-- start interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <interface name="JobHistory.Listener" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="handle"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="recType" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"/>
+ <param name="values" type="java.util.Map"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Callback method for history parser.
+ @param recType type of record, which is the first entry in the line.
+ @param values a map of key-value pairs as thry appear in history.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Callback interface for reading back log events from JobHistory. This interface
+ should be implemented and passed to JobHistory.parseHistory()]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.JobHistory.Listener -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <class name="JobHistory.MapAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.MapAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logStarted(TaskAttemptID, long, String, int, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of this map task attempt.
+ @param taskAttemptId task attempt id
+ @param startTime start time of task attempt as reported by task tracker.
+ @param hostName host name of the task attempt.
+ @deprecated Use
+ {@link #logStarted(TaskAttemptID, long, String, int, String)}]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="trackerName" type="java.lang.String"/>
+ <param name="httpPort" type="int"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of this map task attempt.
+
+ @param taskAttemptId task attempt id
+ @param startTime start time of task attempt as reported by task tracker.
+ @param trackerName name of the tracker executing the task attempt.
+ @param httpPort http port of the task tracker executing the task attempt
+ @param taskType Whether the attempt is cleanup or setup or map]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logFinished(TaskAttemptID, long, String, String, String, Counters)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finish time of map task attempt.
+ @param taskAttemptId task attempt id
+ @param finishTime finish time
+ @param hostName host name
+ @deprecated Use
+ {@link #logFinished(TaskAttemptID, long, String, String, String, Counters)}]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="stateString" type="java.lang.String"/>
+ <param name="counter" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finish time of map task attempt.
+
+ @param taskAttemptId task attempt id
+ @param finishTime finish time
+ @param hostName host name
+ @param taskType Whether the attempt is cleanup or setup or map
+ @param stateString state string of the task attempt
+ @param counter counters of the task attempt]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logFailed(TaskAttemptID, long, String, String, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt failed event.
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.
+ @deprecated Use
+ {@link #logFailed(TaskAttemptID, long, String, String, String)}]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt failed event.
+
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.
+ @param taskType Whether the attempt is cleanup or setup or map]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logKilled(TaskAttemptID, long, String, String, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt killed event.
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.
+ @deprecated Use
+ {@link #logKilled(TaskAttemptID, long, String, String, String)}]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log task attempt killed event.
+
+ @param taskAttemptId task attempt id
+ @param timestamp timestamp
+ @param hostName hostname of this task attempt.
+ @param error error message if any for this task attempt.
+ @param taskType Whether the attempt is cleanup or setup or map]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.MapAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <class name="JobHistory.RecordTypes" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.RecordTypes[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="Jobtracker" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="Job" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="Task" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MapAttempt" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ReduceAttempt" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="Meta" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Record types are identifiers for each line of log in history files.
+ A record type appears as the first token in a single line of log.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.RecordTypes -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <class name="JobHistory.ReduceAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.ReduceAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logStarted(TaskAttemptID, long, String, int, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of Reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param startTime start time
+ @param hostName host name
+ @deprecated Use
+ {@link #logStarted(TaskAttemptID, long, String, int, String)}]]>
+ </doc>
+ </method>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="startTime" type="long"/>
+ <param name="trackerName" type="java.lang.String"/>
+ <param name="httpPort" type="int"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of Reduce task attempt.
+
+ @param taskAttemptId task attempt id
+ @param startTime start time
+ @param trackerName tracker name
+ @param httpPort the http port of the tracker executing the task attempt
+ @param taskType Whether the attempt is cleanup or setup or reduce]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logFinished(TaskAttemptID, long, long, long, String, String, String, Counters)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log finished event of this task.
+ @param taskAttemptId task attempt id
+ @param shuffleFinished shuffle finish time
+ @param sortFinished sort finish time
+ @param finishTime finish time of task
+ @param hostName host name where task attempt executed
+ @deprecated Use
+ {@link #logFinished(TaskAttemptID, long, long, long, String, String, String, Counters)}]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shuffleFinished" type="long"/>
+ <param name="sortFinished" type="long"/>
+ <param name="finishTime" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="stateString" type="java.lang.String"/>
+ <param name="counter" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finished event of this task.
+
+ @param taskAttemptId task attempt id
+ @param shuffleFinished shuffle finish time
+ @param sortFinished sort finish time
+ @param finishTime finish time of task
+ @param hostName host name where task attempt executed
+ @param taskType Whether the attempt is cleanup or setup or reduce
+ @param stateString the state string of the attempt
+ @param counter counters of the attempt]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logFailed(TaskAttemptID, long, String, String, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log failed reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.
+ @deprecated Use
+ {@link #logFailed(TaskAttemptID, long, String, String, String)}]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log failed reduce task attempt.
+
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.
+ @param taskType Whether the attempt is cleanup or setup or reduce]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use
+ {@link #logKilled(TaskAttemptID, long, String, String, String)}">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log killed reduce task attempt.
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.
+ @deprecated Use
+ {@link #logKilled(TaskAttemptID, long, String, String, String)}]]>
+ </doc>
+ </method>
+ <method name="logKilled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskAttemptId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="timestamp" type="long"/>
+ <param name="hostName" type="java.lang.String"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="taskType" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log killed reduce task attempt.
+
+ @param taskAttemptId task attempt id
+ @param timestamp time stamp when task failed
+ @param hostName host name of the task attempt.
+ @param error error message of the task.
+ @param taskType Whether the attempt is cleanup or setup or reduce]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to start, finish or failure of
+ a Map Attempt on a node.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.ReduceAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Task -->
+ <class name="JobHistory.Task" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.Task"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="logStarted"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="startTime" type="long"/>
+ <param name="splitLocations" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log start time of task (TIP).
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param startTime startTime of tip.]]>
+ </doc>
+ </method>
+ <method name="logFinished"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="finishTime" type="long"/>
+ <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
+ <doc>
+ <![CDATA[Log finish time of task.
+ @param taskId task id
+ @param taskType MAP or REDUCE
+ @param finishTime finish timeof task in ms]]>
+ </doc>
+ </method>
+ <method name="logUpdates"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="finishTime" type="long"/>
+ <doc>
+ <![CDATA[Update the finish time of task.
+ @param taskId task id
+ @param finishTime finish time of task in ms]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Log job failed event.
+ @param taskId task id
+ @param taskType MAP or REDUCE.
+ @param time timestamp when job failed detected.
+ @param error error message for failure.]]>
+ </doc>
+ </method>
+ <method name="logFailed"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskID"/>
+ <param name="taskType" type="java.lang.String"/>
+ <param name="time" type="long"/>
+ <param name="error" type="java.lang.String"/>
+ <param name="failedDueToAttempt" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[@param failedDueToAttempt The attempt that caused the failure, if any]]>
+ </doc>
+ </method>
+ <method name="getTaskAttempts" return="java.util.Map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all task attempts for this task. <task attempt id - TaskAttempt>]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Helper class for logging or reading back events related to Task's start, finish or failure.
+ All events logged by this class are logged in a separate file per job in
+ job tracker history. These events map to TIPs in jobtracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Task -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <class name="JobHistory.TaskAttempt" extends="org.apache.hadoop.mapred.JobHistory.Task"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobHistory.TaskAttempt"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Base class for Map and Reduce TaskAttempts.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.TaskAttempt -->
+ <!-- start class org.apache.hadoop.mapred.JobHistory.Values -->
+ <class name="JobHistory.Values" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobHistory.Values[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Values"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="SUCCESS" type="org.apache.hadoop.mapred.JobHistory.Values"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="org.apache.hadoop.mapred.JobHistory.Values"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="KILLED" type="org.apache.hadoop.mapred.JobHistory.Values"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP" type="org.apache.hadoop.mapred.JobHistory.Values"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="REDUCE" type="org.apache.hadoop.mapred.JobHistory.Values"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="CLEANUP" type="org.apache.hadoop.mapred.JobHistory.Values"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNNING" type="org.apache.hadoop.mapred.JobHistory.Values"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PREP" type="org.apache.hadoop.mapred.JobHistory.Values"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SETUP" type="org.apache.hadoop.mapred.JobHistory.Values"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This enum contains some of the values commonly used by history log events.
+ since values in history can only be strings - Values.name() is used in
+ most places in history file.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobHistory.Values -->
+ <!-- start class org.apache.hadoop.mapred.JobID -->
+ <class name="JobID" extends="org.apache.hadoop.mapreduce.JobID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobID" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a JobID object
+ @param jtIdentifier jobTracker identifier
+ @param id job number]]>
+ </doc>
+ </constructor>
+ <constructor name="JobID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="downgrade" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="old" type="org.apache.hadoop.mapreduce.JobID"/>
+ <doc>
+ <![CDATA[Downgrade a new JobID to an old one
+ @param old a new or old JobID
+ @return either old or a new JobID build to match old]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a JobId object from given string
+ @return constructed JobId object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getJobIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>any job</i>
+ run on the jobtracker started at <i>200707121733</i>, we would use :
+ <pre>
+ JobID.getTaskIDsPattern("200707121733", null);
+ </pre>
+ which will return :
+ <pre> "job_200707121733_[0-9]*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @return a regex pattern matching JobIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[JobID represents the immutable and unique identifier for
+ the job. JobID consists of two parts. First part
+ represents the jobtracker identifier, so that jobID to jobtracker map
+ is defined. For cluster setup this string is the jobtracker
+ start time, for local setting, it is "local".
+ Second part of the JobID is the job number. <br>
+ An example JobID is :
+ <code>job_200707121733_0003</code> , which represents the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse JobID strings, but rather
+ use appropriate constructors or {@link #forName(String)} method.
+
+ @see TaskID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobID -->
+ <!-- start class org.apache.hadoop.mapred.JobPriority -->
+ <class name="JobPriority" extends="java.lang.Enum"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobPriority[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="VERY_HIGH" type="org.apache.hadoop.mapred.JobPriority"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="HIGH" type="org.apache.hadoop.mapred.JobPriority"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NORMAL" type="org.apache.hadoop.mapred.JobPriority"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LOW" type="org.apache.hadoop.mapred.JobPriority"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VERY_LOW" type="org.apache.hadoop.mapred.JobPriority"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Used to describe the priority of the running job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobPriority -->
+ <!-- start class org.apache.hadoop.mapred.JobProfile -->
+ <class name="JobProfile" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobProfile"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct an empty {@link JobProfile}.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, org.apache.hadoop.mapreduce.JobID, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a {@link JobProfile} the userid, jobid,
+ job config-file, job-details url and job name.
+
+ @param user userid of the person who submitted the job.
+ @param jobid id of the job.
+ @param jobFile job configuration file.
+ @param url link to the web-ui for details of the job.
+ @param name user-specified job name.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, org.apache.hadoop.mapreduce.JobID, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a {@link JobProfile} the userid, jobid,
+ job config-file, job-details url and job name.
+
+ @param user userid of the person who submitted the job.
+ @param jobid id of the job.
+ @param jobFile job configuration file.
+ @param url link to the web-ui for details of the job.
+ @param name user-specified job name.
+ @param queueName name of the queue to which the job is submitted]]>
+ </doc>
+ </constructor>
+ <constructor name="JobProfile" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="use JobProfile(String, JobID, String, String, String) instead">
+ <doc>
+ <![CDATA[@deprecated use JobProfile(String, JobID, String, String, String) instead]]>
+ </doc>
+ </constructor>
+ <method name="getUser" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user id.]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job id.]]>
+ </doc>
+ </method>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use getJobID() instead">
+ <doc>
+ <![CDATA[@deprecated use getJobID() instead]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the configuration file for the job.]]>
+ </doc>
+ </method>
+ <method name="getURL" return="java.net.URL"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the link to the web-ui for details of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name.]]>
+ </doc>
+ </method>
+ <method name="getQueueName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the queue to which the job is submitted.
+ @return name of the queue.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A JobProfile is a MapReduce primitive. Tracks a job,
+ whether living or dead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobProfile -->
+ <!-- start class org.apache.hadoop.mapred.JobQueueInfo -->
+ <class name="JobQueueInfo" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="JobQueueInfo"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor for Job Queue Info.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobQueueInfo" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a new JobQueueInfo object using the queue name and the
+ scheduling information passed.
+
+ @param queueName Name of the job queue
+ @param schedulingInfo Scheduling Information associated with the job
+ queue]]>
+ </doc>
+ </constructor>
+ <method name="setQueueName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queueName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the queue name of the JobQueueInfo
+
+ @param queueName Name of the job queue.]]>
+ </doc>
+ </method>
+ <method name="getQueueName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the queue name from JobQueueInfo
+
+ @return queue name]]>
+ </doc>
+ </method>
+ <method name="setSchedulingInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="schedulingInfo" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the scheduling information associated to particular job queue
+
+ @param schedulingInfo]]>
+ </doc>
+ </method>
+ <method name="getSchedulingInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the scheduling information associated to particular job queue.
+ If nothing is set would return <b>"N/A"</b>
+
+ @return Scheduling information associated to particular Job Queue]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Class that contains the information regarding the Job Queues which are
+ maintained by the Hadoop Map/Reduce framework.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobQueueInfo -->
+ <!-- start class org.apache.hadoop.mapred.JobStatus -->
+ <class name="JobStatus" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Cloneable"/>
+ <constructor name="JobStatus"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on cleanup
+ @param runState The current state of the job]]>
+ </doc>
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job]]>
+ </doc>
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, int, org.apache.hadoop.mapred.JobPriority"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job
+ @param jp Priority of the job.]]>
+ </doc>
+ </constructor>
+ <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, float, int, org.apache.hadoop.mapred.JobPriority"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.]]>
+ </doc>
+ </constructor>
+ <method name="getJobId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use getJobID instead">
+ <doc>
+ <![CDATA[@deprecated use getJobID instead]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The jobid of the Job]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in maps]]>
+ </doc>
+ </method>
+ <method name="cleanupProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in cleanup]]>
+ </doc>
+ </method>
+ <method name="setupProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in setup]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return Percentage of progress in reduce]]>
+ </doc>
+ </method>
+ <method name="getRunState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return running state of the job]]>
+ </doc>
+ </method>
+ <method name="setRunState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Change the current run state of the job.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return start time of the job]]>
+ </doc>
+ </method>
+ <method name="clone" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getUsername" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the username of the job]]>
+ </doc>
+ </method>
+ <method name="getSchedulingInfo" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the Scheduling information associated to a particular Job.
+ @return the scheduling information of the job]]>
+ </doc>
+ </method>
+ <method name="setSchedulingInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="schedulingInfo" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Used to set the scheduling information associated to a particular Job.
+
+ @param schedulingInfo Scheduling information of the job]]>
+ </doc>
+ </method>
+ <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the priority of the job
+ @return job priority]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jp" type="org.apache.hadoop.mapred.JobPriority"/>
+ <doc>
+ <![CDATA[Set the priority of the job, defaulting to NORMAL.
+ @param jp new job priority]]>
+ </doc>
+ </method>
+ <method name="isJobComplete" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the status is for a completed job.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SUCCEEDED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="PREP" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="KILLED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Describes the current status of a job. This is
+ not intended to be a comprehensive piece of data.
+ For that, look at JobProfile.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobStatus -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker -->
+ <class name="JobTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.InterTrackerProtocol"/>
+ <implements name="org.apache.hadoop.mapred.JobSubmissionProtocol"/>
+ <implements name="org.apache.hadoop.mapred.TaskTrackerManager"/>
+ <implements name="org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol"/>
+ <method name="startTracker" return="org.apache.hadoop.mapred.JobTracker"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker with given configuration.
+
+ The conf will be modified to reflect the actual ports on which
+ the JobTracker is up and running if the user passes the port as
+ <code>zero</code>.
+
+ @param conf configuration for the JobTracker.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="stopTracker"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="hasRestarted" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Whether the JT has restarted]]>
+ </doc>
+ </method>
+ <method name="hasRecovered" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Whether the JT has recovered upon restart]]>
+ </doc>
+ </method>
+ <method name="getRecoveryDuration" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[How long the jobtracker took to recover from restart.]]>
+ </doc>
+ </method>
+ <method name="getInstrumentationClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setInstrumentationClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="t" type="java.lang.Class"/>
+ </method>
+ <method name="getAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="offerService"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Run forever]]>
+ </doc>
+ </method>
+ <method name="getTotalSubmissions" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobTrackerMachine" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTrackerIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the unique identifier (ie. timestamp) of this job tracker start.
+ @return a string with a unique identifier]]>
+ </doc>
+ </method>
+ <method name="getTrackerPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getInfoPort" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="runningJobs" return="java.util.Vector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getRunningJobs" return="java.util.List"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Version that is called from a timer thread, and therefore needs to be
+ careful to synchronize.]]>
+ </doc>
+ </method>
+ <method name="failedJobs" return="java.util.Vector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="completedJobs" return="java.util.Vector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="taskTrackers" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get all the task trackers in the cluster
+
+ @return {@link Collection} of {@link TaskTrackerStatus}]]>
+ </doc>
+ </method>
+ <method name="activeTaskTrackers" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the active task tracker statuses in the cluster
+
+ @return {@link Collection} of active {@link TaskTrackerStatus}]]>
+ </doc>
+ </method>
+ <method name="taskTrackerNames" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the active and blacklisted task tracker names in the cluster. The first
+ element in the returned list contains the list of active tracker names.
+ The second element in the returned list contains the list of blacklisted
+ tracker names.]]>
+ </doc>
+ </method>
+ <method name="blacklistedTaskTrackers" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the blacklisted task tracker statuses in the cluster
+
+ @return {@link Collection} of blacklisted {@link TaskTrackerStatus}]]>
+ </doc>
+ </method>
+ <method name="isBlacklisted" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trackerID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Whether the tracker is blacklisted or not
+
+ @param trackerID
+
+ @return true if blacklisted, false otherwise]]>
+ </doc>
+ </method>
+ <method name="getTaskTracker" return="org.apache.hadoop.mapred.TaskTrackerStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="trackerID" type="java.lang.String"/>
+ </method>
+ <method name="resolveAndAddToTopology" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="getNodesAtMaxLevel" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns a collection of nodes at the max level]]>
+ </doc>
+ </method>
+ <method name="getParentNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="node" type="org.apache.hadoop.net.Node"/>
+ <param name="level" type="int"/>
+ </method>
+ <method name="getNode" return="org.apache.hadoop.net.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Return the Node in the network topology that corresponds to the hostname]]>
+ </doc>
+ </method>
+ <method name="getNumTaskCacheLevels" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumResolvedTaskTrackers" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNumberOfUniqueHosts" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="addJobInProgressListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="listener" type="org.apache.hadoop.mapred.JobInProgressListener"/>
+ </method>
+ <method name="removeJobInProgressListener"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="listener" type="org.apache.hadoop.mapred.JobInProgressListener"/>
+ </method>
+ <method name="getQueueManager" return="org.apache.hadoop.mapred.QueueManager"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the {@link QueueManager} associated with the JobTracker.]]>
+ </doc>
+ </method>
+ <method name="getBuildVersion" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="heartbeat" return="org.apache.hadoop.mapred.HeartbeatResponse"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskTrackerStatus"/>
+ <param name="restarted" type="boolean"/>
+ <param name="initialContact" type="boolean"/>
+ <param name="acceptNewTasks" type="boolean"/>
+ <param name="responseId" type="short"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The periodic heartbeat mechanism between the {@link TaskTracker} and
+ the {@link JobTracker}.
+
+ The {@link JobTracker} processes the status information sent by the
+ {@link TaskTracker} and responds with instructions to start/stop
+ tasks or jobs, and also 'reset' instructions during contingencies.]]>
+ </doc>
+ </method>
+ <method name="getNextHeartbeatInterval" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Calculates next heartbeat interval using cluster size.
+ Heartbeat interval is incremented 1second for every 50 nodes.
+ @return next heartbeat interval.]]>
+ </doc>
+ </method>
+ <method name="getFilesystemName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Grab the local fs name]]>
+ </doc>
+ </method>
+ <method name="reportTaskTrackerError"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTracker" type="java.lang.String"/>
+ <param name="errorClass" type="java.lang.String"/>
+ <param name="errorMessage" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNewJobId" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Allocates a new JobId string.]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[JobTracker.submitJob() kicks off a new job.
+
+ Create a 'JobInProgress' object, which contains both JobProfile
+ and JobStatus. Those two sub-objects are sometimes shipped outside
+ of the JobTracker. But JobInProgress adds info that's useful for
+ the JobTracker alone.]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getClusterStatus(boolean)}">
+ <doc>
+ <![CDATA[@deprecated use {@link #getClusterStatus(boolean)}]]>
+ </doc>
+ </method>
+ <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="detailed" type="boolean"/>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="priority" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the priority of a job
+ @param jobid id of the job
+ @param priority new priority of the job]]>
+ </doc>
+ </method>
+ <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getCleanupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getSetupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxEvents" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getTaskDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the diagnostics for a given task
+ @param taskId the id of the task
+ @return an array of the diagnostic messages]]>
+ </doc>
+ </method>
+ <method name="getTip" return="org.apache.hadoop.mapred.TaskInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tipid" type="org.apache.hadoop.mapred.TaskID"/>
+ <doc>
+ <![CDATA[Returns specified TaskInProgress, or null.]]>
+ </doc>
+ </method>
+ <method name="killTask" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Mark a Task to be killed]]>
+ </doc>
+ </method>
+ <method name="getAssignedTracker" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Get tracker name for a given task id.
+ @param taskId the name of the task
+ @return The name of the task tracker]]>
+ </doc>
+ </method>
+ <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getSystemDir" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@see org.apache.hadoop.mapred.JobSubmissionProtocol#getSystemDir()]]>
+ </doc>
+ </method>
+ <method name="getJob" return="org.apache.hadoop.mapred.JobInProgress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+ </method>
+ <method name="getLocalJobFilePath" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Get the localized job file path on the job trackers local file system
+ @param jobId id of the job
+ @return the path of the job conf file on the local file system]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start the JobTracker process. This is used only for debugging. As a rule,
+ JobTracker should be run as part of the DFS Namenode process.]]>
+ </doc>
+ </method>
+ <method name="getQueues" return="org.apache.hadoop.mapred.JobQueueInfo[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getQueueInfo" return="org.apache.hadoop.mapred.JobQueueInfo"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queue" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getJobsFromQueue" return="org.apache.hadoop.mapred.JobStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="queue" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="refreshServiceAcl"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[JobTracker is the central location for submitting and
+ tracking MR jobs in a network environment.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <class name="JobTracker.IllegalStateException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobTracker.IllegalStateException" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[A client tried to submit a job before the Job Tracker was ready.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.IllegalStateException -->
+ <!-- start class org.apache.hadoop.mapred.JobTracker.State -->
+ <class name="JobTracker.State" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.JobTracker.State[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.JobTracker.State"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="INITIALIZING" type="org.apache.hadoop.mapred.JobTracker.State"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNNING" type="org.apache.hadoop.mapred.JobTracker.State"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.JobTracker.State -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <class name="KeyValueLineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader"/>
+ <constructor name="KeyValueLineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="findSeparator" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="utf" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="length" type="int"/>
+ <param name="sep" type="byte"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class treats a line in the input as a key/value pair separated by a
+ separator character. The separator can be specified in config file
+ under the attribute name key.value.separator.in.input.line. The default
+ separator is the tab character ('\t').]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <class name="KeyValueTextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="KeyValueTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Each line
+ is divided into key and value parts by a separator byte. If no such a byte
+ exists, the key will be the entire line and value will be empty.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader -->
+ <class name="LineRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use
+ {@link org.apache.hadoop.mapreduce.lib.input.LineRecordReader} instead.">
+ <implements name="org.apache.hadoop.mapred.RecordReader"/>
+ <constructor name="LineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="LineRecordReader" type="java.io.InputStream, long, long, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.LongWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.LongWritable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the progress within the split]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Treats keys as offset in file and value as line.
+ @deprecated Use
+ {@link org.apache.hadoop.mapreduce.lib.input.LineRecordReader} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <class name="LineRecordReader.LineReader" extends="org.apache.hadoop.util.LineReader"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.util.LineReader} instead.">
+ <constructor name="LineRecordReader.LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <doc>
+ <![CDATA[A class that provides a line reader from an input stream.
+ @deprecated Use {@link org.apache.hadoop.util.LineReader} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
+ <!-- start class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <class name="MapFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.MapFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getEntry" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="readers" type="org.apache.hadoop.io.MapFile.Reader[]"/>
+ <param name="partitioner" type="org.apache.hadoop.mapred.Partitioner"/>
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get an entry from output generated by this class.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link MapFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapFileOutputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.Mapper -->
+ <interface name="Mapper" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.Mapper} instead.">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Maps a single input key/value pair into an intermediate key/value pair.
+
+ <p>Output pairs need not be of the same types as input pairs. A given
+ input pair may map to zero or many output pairs. Output pairs are
+ collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../mapred-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the input key.
+ @param value the input value.
+ @param output collects mapped keys and values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Maps input key/value pairs to a set of intermediate key/value pairs.
+
+ <p>Maps are the individual tasks which transform input records into a
+ intermediate records. The transformed intermediate records need not be of
+ the same type as the input records. A given input pair may map to zero or
+ many output pairs.</p>
+
+ <p>The Hadoop Map-Reduce framework spawns one map task for each
+ {@link InputSplit} generated by the {@link InputFormat} for the job.
+ <code>Mapper</code> implementations can access the {@link JobConf} for the
+ job via the {@link JobConfigurable#configure(JobConf)} and initialize
+ themselves. Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p>The framework then calls
+ {@link #map(Object, Object, OutputCollector, Reporter)}
+ for each key/value pair in the <code>InputSplit</code> for that task.</p>
+
+ <p>All intermediate values associated with a given output key are
+ subsequently grouped by the framework, and passed to a {@link Reducer} to
+ determine the final output. Users can control the grouping by specifying
+ a <code>Comparator</code> via
+ {@link JobConf#setOutputKeyComparatorClass(Class)}.</p>
+
+ <p>The grouped <code>Mapper</code> outputs are partitioned per
+ <code>Reducer</code>. Users can control which keys (and hence records) go to
+ which <code>Reducer</code> by implementing a custom {@link Partitioner}.
+
+ <p>Users can optionally specify a <code>combiner</code>, via
+ {@link JobConf#setCombinerClass(Class)}, to perform local aggregation of the
+ intermediate outputs, which helps to cut down the amount of data transferred
+ from the <code>Mapper</code> to the <code>Reducer</code>.
+
+ <p>The intermediate, grouped outputs are always stored in
+ {@link SequenceFile}s. Applications can specify if and how the intermediate
+ outputs are to be compressed and which {@link CompressionCodec}s are to be
+ used via the <code>JobConf</code>.</p>
+
+ <p>If the job has
+ <a href="{@docRoot}/org/apache/hadoop/mapred/JobConf.html#ReducerNone">zero
+ reduces</a> then the output of the <code>Mapper</code> is directly written
+ to the {@link FileSystem} without grouping by keys.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyMapper&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Mapper&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String mapTaskId;
+ private String inputFile;
+ private int noRecords = 0;
+
+ public void configure(JobConf job) {
+ mapTaskId = job.get("mapred.task.id");
+ inputFile = job.get("map.input.file");
+ }
+
+ public void map(K key, V val,
+ OutputCollector&lt;K, V&gt; output, Reporter reporter)
+ throws IOException {
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ // reporter.progress();
+
+ // Process some more
+ // ...
+ // ...
+
+ // Increment the no. of &lt;key, value&gt; pairs processed
+ ++noRecords;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 records update application-level status
+ if ((noRecords%100) == 0) {
+ reporter.setStatus(mapTaskId + " processed " + noRecords +
+ " from input-file: " + inputFile);
+ }
+
+ // Output the result
+ output.collect(key, val);
+ }
+ }
+ </pre></blockquote></p>
+
+ <p>Applications may write a custom {@link MapRunnable} to exert greater
+ control on map processing e.g. multi-threaded <code>Mapper</code>s etc.</p>
+
+ @see JobConf
+ @see InputFormat
+ @see Partitioner
+ @see Reducer
+ @see MapReduceBase
+ @see MapRunnable
+ @see SequenceFile
+ @deprecated Use {@link org.apache.hadoop.mapreduce.Mapper} instead.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Mapper -->
+ <!-- start class org.apache.hadoop.mapred.MapReduceBase -->
+ <class name="MapReduceBase" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="MapReduceBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Default implementation that does nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for {@link Mapper} and {@link Reducer} implementations.
+
+ <p>Provides default no-op implementations for a few methods, most non-trivial
+ applications need to override some of them.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapReduceBase -->
+ <!-- start class org.apache.hadoop.mapred.MapReducePolicyProvider -->
+ <class name="MapReducePolicyProvider" extends="org.apache.hadoop.security.authorize.PolicyProvider"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapReducePolicyProvider"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getServices" return="org.apache.hadoop.security.authorize.Service[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[{@link PolicyProvider} for Map-Reduce protocols.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapReducePolicyProvider -->
+ <!-- start interface org.apache.hadoop.mapred.MapRunnable -->
+ <interface name="MapRunnable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.Mapper} instead.">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start mapping input <tt>&lt;key, value&gt;</tt> pairs.
+
+ <p>Mapping of input records to output records is complete when this method
+ returns.</p>
+
+ @param input the {@link RecordReader} to read the input records.
+ @param output the {@link OutputCollector} to collect the outputrecords.
+ @param reporter {@link Reporter} to report progress, status-updates etc.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Expert: Generic interface for {@link Mapper}s.
+
+ <p>Custom implementations of <code>MapRunnable</code> can exert greater
+ control on map processing e.g. multi-threaded, asynchronous mappers etc.</p>
+
+ @see Mapper
+ @deprecated Use {@link org.apache.hadoop.mapreduce.Mapper} instead.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.MapRunnable -->
+ <!-- start class org.apache.hadoop.mapred.MapRunner -->
+ <class name="MapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable"/>
+ <constructor name="MapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getMapper" return="org.apache.hadoop.mapred.Mapper"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Default {@link MapRunnable} implementation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MapRunner -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <class name="MultiFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapred.lib.CombineFileInputFormat} instead">
+ <constructor name="MultiFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s
+ in {@link #getSplits(JobConf, int)} method. Splits are constructed from
+ the files under the input paths. Each split returned contains <i>nearly</i>
+ equal content length. <br>
+ Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)}
+ to construct <code>RecordReader</code>'s for <code>MultiFileSplit</code>'s.
+ @see MultiFileSplit
+ @deprecated Use {@link org.apache.hadoop.mapred.lib.CombineFileInputFormat} instead]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.MultiFileSplit -->
+ <class name="MultiFileSplit" extends="org.apache.hadoop.mapred.lib.CombineFileSplit"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapred.lib.CombineFileSplit} instead">
+ <constructor name="MultiFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A sub-collection of input files. Unlike {@link FileSplit}, MultiFileSplit
+ class does not represent a split of a file, but a split of input files
+ into smaller sets. The atomic unit of split is a file. <br>
+ MultiFileSplit can be used to implement {@link RecordReader}'s, with
+ reading one record per file.
+ @see FileSplit
+ @see MultiFileInputFormat
+ @deprecated Use {@link org.apache.hadoop.mapred.lib.CombineFileSplit} instead]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.MultiFileSplit -->
+ <!-- start interface org.apache.hadoop.mapred.OutputCollector -->
+ <interface name="OutputCollector" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="collect"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Adds a key/value pair to the output.
+
+ @param key the key to collect.
+ @param value to value to collect.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Collects the <code>&lt;key, value&gt;</code> pairs output by {@link Mapper}s
+ and {@link Reducer}s.
+
+ <p><code>OutputCollector</code> is the generalization of the facility
+ provided by the Map-Reduce framework to collect data output by either the
+ <code>Mapper</code> or the <code>Reducer</code> i.e. intermediate outputs
+ or the output of the job.</p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputCollector -->
+ <!-- start class org.apache.hadoop.mapred.OutputCommitter -->
+ <class name="OutputCommitter" extends="org.apache.hadoop.mapreduce.OutputCommitter"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.OutputCommitter} instead.">
+ <constructor name="OutputCommitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setupJob"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobContext" type="org.apache.hadoop.mapred.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For the framework to setup the job output during initialization
+
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException if temporary output could not be created]]>
+ </doc>
+ </method>
+ <method name="cleanupJob"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobContext" type="org.apache.hadoop.mapred.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For cleaning up the job's output after job completion
+
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setupTask"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets up output for the task.
+
+ @param taskContext Context of the task whose output is being written.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="needsTaskCommit" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check whether task needs a commit
+
+ @param taskContext
+ @return true/false
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="commitTask"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[To promote the task's temporary output to final output location
+
+ The task's output is moved to the job's output directory.
+
+ @param taskContext Context of the task whose output is being written.
+ @throws IOException if commit is not]]>
+ </doc>
+ </method>
+ <method name="abortTask"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Discard the task output
+
+ @param taskContext
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setupJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+ </doc>
+ </method>
+ <method name="cleanupJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+ </doc>
+ </method>
+ <method name="setupTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+ </doc>
+ </method>
+ <method name="needsTaskCommit" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+ </doc>
+ </method>
+ <method name="commitTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+ </doc>
+ </method>
+ <method name="abortTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>OutputCommitter</code> describes the commit of task output for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputCommitter</code> of
+ the job to:<p>
+ <ol>
+ <li>
+ Setup the job during initialization. For example, create the temporary
+ output directory for the job during the initialization of the job.
+ </li>
+ <li>
+ Cleanup the job after the job completion. For example, remove the
+ temporary output directory after the job completion.
+ </li>
+ <li>
+ Setup the task temporary output.
+ </li>
+ <li>
+ Check whether a task needs a commit. This is to avoid the commit
+ procedure if a task does not need commit.
+ </li>
+ <li>
+ Commit of the task output.
+ </li>
+ <li>
+ Discard the task commit.
+ </li>
+ </ol>
+
+ @see FileOutputCommitter
+ @see JobContext
+ @see TaskAttemptContext
+ @deprecated Use {@link org.apache.hadoop.mapreduce.OutputCommitter} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputCommitter -->
+ <!-- start interface org.apache.hadoop.mapred.OutputFormat -->
+ <interface name="OutputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.OutputFormat} instead.">
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordWriter} for the given job.
+
+ @param ignored
+ @param job configuration for the job whose output is being written.
+ @param name the unique name for this part of the output.
+ @param progress mechanism for reporting progress while writing to file.
+ @return a {@link RecordWriter} to write the output for the job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check for validity of the output-specification for the job.
+
+ <p>This is to validate the output specification for the job when it is
+ a job is submitted. Typically checks that it does not already exist,
+ throwing an exception when it already exists, so that output is not
+ overwritten.</p>
+
+ @param ignored
+ @param job job configuration.
+ @throws IOException when output should not be attempted]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>OutputFormat</code> describes the output-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the output-specification of the job. For e.g. check that the
+ output directory doesn't already exist.
+ <li>
+ Provide the {@link RecordWriter} implementation to be used to write out
+ the output files of the job. Output files are stored in a
+ {@link FileSystem}.
+ </li>
+ </ol>
+
+ @see RecordWriter
+ @see JobConf
+ @deprecated Use {@link org.apache.hadoop.mapreduce.OutputFormat} instead.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.OutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.OutputLogFilter -->
+ <class name="OutputLogFilter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.fs.PathFilter"/>
+ <constructor name="OutputLogFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <doc>
+ <![CDATA[This class filters log files from directory given
+ It doesnt accept paths having _logs.
+ This can be used to list paths of output directory as follows:
+ Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
+ new OutputLogFilter()));]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.OutputLogFilter -->
+ <!-- start interface org.apache.hadoop.mapred.Partitioner -->
+ <interface name="Partitioner" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.Partitioner} instead.">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="numPartitions" type="int"/>
+ <doc>
+ <![CDATA[Get the paritition number for a given key (hence record) given the total
+ number of partitions i.e. number of reduce-tasks for the job.
+
+ <p>Typically a hash function on a all or a subset of the key.</p>
+
+ @param key the key to be paritioned.
+ @param value the entry value.
+ @param numPartitions the total number of partitions.
+ @return the partition number for the <code>key</code>.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partitions the key space.
+
+ <p><code>Partitioner</code> controls the partitioning of the keys of the
+ intermediate map-outputs. The key (or a subset of the key) is used to derive
+ the partition, typically by a hash function. The total number of partitions
+ is the same as the number of reduce tasks for the job. Hence this controls
+ which of the <code>m</code> reduce tasks the intermediate key (and hence the
+ record) is sent for reduction.</p>
+
+ @see Reducer
+ @deprecated Use {@link org.apache.hadoop.mapreduce.Partitioner} instead.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Partitioner -->
+ <!-- start interface org.apache.hadoop.mapred.RawKeyValueIterator -->
+ <interface name="RawKeyValueIterator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getKey" return="org.apache.hadoop.io.DataInputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw key.
+
+ @return Gets the current raw key as a DataInputBuffer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getValue" return="org.apache.hadoop.io.DataInputBuffer"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the current raw value.
+
+ @return Gets the current raw value as a DataInputBuffer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets up the current key and value (for getKey and getValue).
+
+ @return <code>true</code> if there exists a key/value,
+ <code>false</code> otherwise.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes the iterator so that the underlying streams can be closed.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="org.apache.hadoop.util.Progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Gets the Progress object; this has a float (0.0 - 1.0)
+ indicating the bytes processed by the iterator so far]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RawKeyValueIterator</code> is an iterator used to iterate over
+ the raw keys and values during sort/merge of intermediate data.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RawKeyValueIterator -->
+ <!-- start interface org.apache.hadoop.mapred.RecordReader -->
+ <interface name="RecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Reads the next key/value pair from the input for processing.
+
+ @param key the key to read data into
+ @param value the value to read data into
+ @return true iff a key/value was read, false if at EOF]]>
+ </doc>
+ </method>
+ <method name="createKey" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a key.
+
+ @return a new key object.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an object of the appropriate type to be used as a value.
+
+ @return a new value object.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current position in the input.
+
+ @return the current position in the input.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this {@link InputSplit} to future operations.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[How much of the input has the {@link RecordReader} consumed i.e.
+ has been processed by?
+
+ @return progress from <code>0.0</code> to <code>1.0</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordReader</code> reads &lt;key, value&gt; pairs from an
+ {@link InputSplit}.
+
+ <p><code>RecordReader</code>, typically, converts the byte-oriented view of
+ the input, provided by the <code>InputSplit</code>, and presents a
+ record-oriented view for the {@link Mapper} & {@link Reducer} tasks for
+ processing. It thus assumes the responsibility of processing record
+ boundaries and presenting the tasks with keys and values.</p>
+
+ @see InputSplit
+ @see InputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordReader -->
+ <!-- start interface org.apache.hadoop.mapred.RecordWriter -->
+ <interface name="RecordWriter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes a key/value pair.
+
+ @param key the key to write.
+ @param value the value to write.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close this <code>RecordWriter</code> to future operations.
+
+ @param reporter facility to report progress.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordWriter</code> writes the output &lt;key, value&gt; pairs
+ to an output file.
+
+ <p><code>RecordWriter</code> implementations write the job outputs to the
+ {@link FileSystem}.
+
+ @see OutputFormat]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RecordWriter -->
+ <!-- start interface org.apache.hadoop.mapred.Reducer -->
+ <interface name="Reducer" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.Reducer} instead.">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <implements name="org.apache.hadoop.io.Closeable"/>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="values" type="java.util.Iterator"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[<i>Reduces</i> values for a given key.
+
+ <p>The framework calls this method for each
+ <code>&lt;key, (list of values)></code> pair in the grouped inputs.
+ Output values must be of the same type as input values. Input keys must
+ not be altered. The framework will <b>reuse</b> the key and value objects
+ that are passed into the reduce, therefore the application should clone
+ the objects they want to keep a copy of. In many cases, all values are
+ combined into zero or one value.
+ </p>
+
+ <p>Output pairs are collected with calls to
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress
+ or just indicate that they are alive. In scenarios where the application
+ takes an insignificant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has
+ timed-out and kill that task. The other way of avoiding this is to set
+ <a href="{@docRoot}/../mapred-default.html#mapred.task.timeout">
+ mapred.task.timeout</a> to a high-enough value (or even zero for no
+ time-outs).</p>
+
+ @param key the key.
+ @param values the list of values to reduce.
+ @param output to collect keys and combined values.
+ @param reporter facility to report progress.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reduces a set of intermediate values which share a key to a smaller set of
+ values.
+
+ <p>The number of <code>Reducer</code>s for the job is set by the user via
+ {@link JobConf#setNumReduceTasks(int)}. <code>Reducer</code> implementations
+ can access the {@link JobConf} for the job via the
+ {@link JobConfigurable#configure(JobConf)} method and initialize themselves.
+ Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p><code>Reducer</code> has 3 primary phases:</p>
+ <ol>
+ <li>
+
+ <h4 id="Shuffle">Shuffle</h4>
+
+ <p><code>Reducer</code> is input the grouped output of a {@link Mapper}.
+ In the phase the framework, for each <code>Reducer</code>, fetches the
+ relevant partition of the output of all the <code>Mapper</code>s, via HTTP.
+ </p>
+ </li>
+
+ <li>
+ <h4 id="Sort">Sort</h4>
+
+ <p>The framework groups <code>Reducer</code> inputs by <code>key</code>s
+ (since different <code>Mapper</code>s may have output the same key) in this
+ stage.</p>
+
+ <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
+ being fetched they are merged.</p>
+
+ <h5 id="SecondarySort">SecondarySort</h5>
+
+ <p>If equivalence rules for keys while grouping the intermediates are
+ different from those for grouping keys before reduction, then one may
+ specify a <code>Comparator</code> via
+ {@link JobConf#setOutputValueGroupingComparator(Class)}.Since
+ {@link JobConf#setOutputKeyComparatorClass(Class)} can be used to
+ control how intermediate keys are grouped, these can be used in conjunction
+ to simulate <i>secondary sort on values</i>.</p>
+
+
+ For example, say that you want to find duplicate web pages and tag them
+ all with the url of the "best" known example. You would set up the job
+ like:
+ <ul>
+ <li>Map Input Key: url</li>
+ <li>Map Input Value: document</li>
+ <li>Map Output Key: document checksum, url pagerank</li>
+ <li>Map Output Value: url</li>
+ <li>Partitioner: by checksum</li>
+ <li>OutputKeyComparator: by checksum and then decreasing pagerank</li>
+ <li>OutputValueGroupingComparator: by checksum</li>
+ </ul>
+ </li>
+
+ <li>
+ <h4 id="Reduce">Reduce</h4>
+
+ <p>In this phase the
+ {@link #reduce(Object, Iterator, OutputCollector, Reporter)}
+ method is called for each <code>&lt;key, (list of values)></code> pair in
+ the grouped inputs.</p>
+ <p>The output of the reduce task is typically written to the
+ {@link FileSystem} via
+ {@link OutputCollector#collect(Object, Object)}.</p>
+ </li>
+ </ol>
+
+ <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class MyReducer&lt;K extends WritableComparable, V extends Writable&gt;
+ extends MapReduceBase implements Reducer&lt;K, V, K, V&gt; {
+
+ static enum MyCounters { NUM_RECORDS }
+
+ private String reduceTaskId;
+ private int noKeys = 0;
+
+ public void configure(JobConf job) {
+ reduceTaskId = job.get("mapred.task.id");
+ }
+
+ public void reduce(K key, Iterator&lt;V&gt; values,
+ OutputCollector&lt;K, V&gt; output,
+ Reporter reporter)
+ throws IOException {
+
+ // Process
+ int noValues = 0;
+ while (values.hasNext()) {
+ V value = values.next();
+
+ // Increment the no. of values for this key
+ ++noValues;
+
+ // Process the &lt;key, value&gt; pair (assume this takes a while)
+ // ...
+ // ...
+
+ // Let the framework know that we are alive, and kicking!
+ if ((noValues%10) == 0) {
+ reporter.progress();
+ }
+
+ // Process some more
+ // ...
+ // ...
+
+ // Output the &lt;key, value&gt;
+ output.collect(key, value);
+ }
+
+ // Increment the no. of &lt;key, list of values&gt; pairs processed
+ ++noKeys;
+
+ // Increment counters
+ reporter.incrCounter(NUM_RECORDS, 1);
+
+ // Every 100 keys update application-level status
+ if ((noKeys%100) == 0) {
+ reporter.setStatus(reduceTaskId + " processed " + noKeys);
+ }
+ }
+ }
+ </pre></blockquote></p>
+
+ @see Mapper
+ @see Partitioner
+ @see Reporter
+ @see MapReduceBase
+ @deprecated Use {@link org.apache.hadoop.mapreduce.Reducer} instead.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reducer -->
+ <!-- start interface org.apache.hadoop.mapred.Reporter -->
+ <interface name="Reporter" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Progressable"/>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the status description for the task.
+
+ @param status brief description of the current status.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Get the {@link Counter} of the given group with the given name.
+
+ @param name counter name
+ @return the <code>Counter</code> of the given group/name.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Get the {@link Counter} of the given group with the given name.
+
+ @param group counter group
+ @param name counter name
+ @return the <code>Counter</code> of the given group/name.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the key, which can be of
+ any {@link Enum} type, by the specified amount.
+
+ @param key key to identify the counter to be incremented. The key can be
+ be any <code>Enum</code>.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="incrCounter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="counter" type="java.lang.String"/>
+ <param name="amount" type="long"/>
+ <doc>
+ <![CDATA[Increments the counter identified by the group and counter name
+ by the specified amount.
+
+ @param group name to identify the group of the counter to be incremented.
+ @param counter name to identify the counter within the group.
+ @param amount A non-negative amount by which the counter is to
+ be incremented.]]>
+ </doc>
+ </method>
+ <method name="getInputSplit" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+ <doc>
+ <![CDATA[Get the {@link InputSplit} object for a map.
+
+ @return the <code>InputSplit</code> that the map is reading from.
+ @throws UnsupportedOperationException if called outside a mapper]]>
+ </doc>
+ </method>
+ <field name="NULL" type="org.apache.hadoop.mapred.Reporter"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A constant of Reporter type that does nothing.]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A facility for Map-Reduce applications to report progress and update
+ counters, status information etc.
+
+ <p>{@link Mapper} and {@link Reducer} can use the <code>Reporter</code>
+ provided to report progress or just indicate that they are alive. In
+ scenarios where the application takes an insignificant amount of time to
+ process individual key/value pairs, this is crucial since the framework
+ might assume that the task has timed-out and kill that task.
+
+ <p>Applications can also update {@link Counters} via the provided
+ <code>Reporter</code> .</p>
+
+ @see Progressable
+ @see Counters]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.Reporter -->
+ <!-- start interface org.apache.hadoop.mapred.RunningJob -->
+ <interface name="RunningJob" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the job identifier.
+
+ @return the job identifier.]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="This method is deprecated and will be removed. Applications should
+ rather use {@link #getID()}.">
+ <doc>
+ <![CDATA[@deprecated This method is deprecated and will be removed. Applications should
+ rather use {@link #getID()}.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the job.
+
+ @return the name of the job.]]>
+ </doc>
+ </method>
+ <method name="getJobFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the path of the submitted job configuration.
+
+ @return the path of the submitted job configuration.]]>
+ </doc>
+ </method>
+ <method name="getTrackingURL" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the URL where some job progress information will be displayed.
+
+ @return the URL where some job progress information will be displayed.]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's map-tasks, as a float between 0.0
+ and 1.0. When all map tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's map-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0
+ and 1.0. When all reduce tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's reduce-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="cleanupProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's cleanup-tasks, as a float between 0.0
+ and 1.0. When all cleanup tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's cleanup-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setupProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's setup-tasks, as a float between 0.0
+ and 1.0. When all setup tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's setup-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isComplete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job is finished or not.
+ This is a non-blocking call.
+
+ @return <code>true</code> if the job is complete, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isSuccessful" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job completed successfully.
+
+ @return <code>true</code> if the job succeeded, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="waitForCompletion"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Blocks until the job is complete.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getJobState" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns the current state of the Job.
+ {@link JobStatus}
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill the running job. Blocks until all job tasks have been
+ killed as well. If the job is no longer running, it simply returns.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setJobPriority"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="priority" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the priority of a running job.
+ @param priority the new priority for the job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="startFrom" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get events indicating completion (success/failure) of component tasks.
+
+ @param startFrom index to start fetching events from
+ @return an array of {@link TaskCompletionEvent}s
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill indicated task attempt.
+
+ @param taskId the id of the task to be terminated.
+ @param shouldFail if true the task is failed and added to failed tasks
+ list, otherwise it is just killed, w/o affecting
+ job failure status.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="Applications should rather use {@link #killTask(TaskAttemptID, boolean)}">
+ <param name="taskId" type="java.lang.String"/>
+ <param name="shouldFail" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the counters for this job.
+
+ @return the counters for this job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the diagnostic messages for a given task attempt.
+ @param taskid
+ @return the list of diagnostic messages for the task
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RunningJob</code> is the user-interface to query for details on a
+ running Map-Reduce job.
+
+ <p>Clients can get hold of <code>RunningJob</code> via the {@link JobClient}
+ and then query the running-job for details such as name, configuration,
+ progress etc.</p>
+
+ @see JobClient]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.RunningJob -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <class name="SequenceFileAsBinaryInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[InputFormat reading keys, values from SequenceFiles in binary (raw)
+ format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <class name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader"/>
+ <constructor name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.BytesWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getKeyClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the key class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getKeyClassName]]>
+ </doc>
+ </method>
+ <method name="getValueClassName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Retrieve the name of the value class for this SequenceFile.
+ @see org.apache.hadoop.io.SequenceFile.Reader#getValueClassName]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.BytesWritable"/>
+ <param name="val" type="org.apache.hadoop.io.BytesWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read raw bytes from a SequenceFile.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Read records from a SequenceFile as binary (raw) bytes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+ <class name="SequenceFileAsBinaryOutputFormat" extends="org.apache.hadoop.mapred.SequenceFileOutputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsBinaryOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setSequenceFileOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the key class for the {@link SequenceFile}
+ <p>This allows the user to specify the key class to be different
+ from the actual class ({@link BytesWritable}) used for writing </p>
+
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+ </doc>
+ </method>
+ <method name="setSequenceFileOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="theClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the value class for the {@link SequenceFile}
+ <p>This allows the user to specify the value class to be different
+ from the actual class ({@link BytesWritable}) used for writing </p>
+
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+ </doc>
+ </method>
+ <method name="getSequenceFileOutputKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the key class for the {@link SequenceFile}
+
+ @return the key class of the {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <method name="getSequenceFileOutputValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the value class for the {@link SequenceFile}
+
+ @return the value class of the {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes keys, values to
+ {@link SequenceFile}s in binary(raw) format]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
+ <class name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.SequenceFile.ValueBytes"/>
+ <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" type="org.apache.hadoop.io.BytesWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="value" type="org.apache.hadoop.io.BytesWritable"/>
+ </method>
+ <method name="writeUncompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="writeCompressedBytes"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="outStream" type="java.io.DataOutputStream"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSize" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Inner class used for appendRaw]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <class name="SequenceFileAsTextInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileAsTextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is similar to SequenceFileInputFormat, except it generates SequenceFileAsTextRecordReader
+ which converts the input keys and values to their String forms by calling toString() method.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <class name="SequenceFileAsTextRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader"/>
+ <constructor name="SequenceFileAsTextRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="createKey" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read key/value pair in a line.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class converts the input keys and values to their String forms by calling toString()
+ method. This class to SequenceFileAsTextInputFormat class is as LineRecordReader
+ class to TextInputFormat class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <class name="SequenceFileInputFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a record reader for the given split
+ @param split file split
+ @param job job configuration
+ @param reporter reporter who sends report to task tracker
+ @return RecordReader]]>
+ </doc>
+ </method>
+ <method name="setFilterClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="filterClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[set the filter class
+
+ @param conf application configuration
+ @param filterClass filter class]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A class that allows a map/red job to work on a sample of sequence files.
+ The sample is decided by the filter class set by the job.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+ <!-- start interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <interface name="SequenceFileInputFilter.Filter" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[filter function
+ Decide if a record should be filtered or not
+ @param key record key
+ @return true if a record is accepted; return false otherwise]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[filter interface]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <class name="SequenceFileInputFilter.FilterBase" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.SequenceFileInputFilter.Filter"/>
+ <constructor name="SequenceFileInputFilter.FilterBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[base class for Filters]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <class name="SequenceFileInputFilter.MD5Filter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.MD5Filter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the filtering frequency in configuration
+
+ @param conf configuration
+ @param frequency filtering frequency]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter according to configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If MD5(key) % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <field name="MD5_LEN" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class returns a set of records by examing the MD5 digest of its
+ key against a filtering frequency <i>f</i>. The filtering criteria is
+ MD5(key) % f == 0.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <class name="SequenceFileInputFilter.PercentFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.PercentFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFrequency"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="frequency" type="int"/>
+ <doc>
+ <![CDATA[set the frequency and stores it in conf
+ @param conf configuration
+ @param frequency filtering frequencey]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the filter by checking the configuration
+
+ @param conf configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If record# % frequency==0, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class returns a percentage of records
+ The percentage is determined by a filtering frequency <i>f</i> using
+ the criteria record# % f == 0.
+ For example, if the frequency is 10, one out of 10 records is returned.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <class name="SequenceFileInputFilter.RegexFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFilter.RegexFilter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setPattern"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="regex" type="java.lang.String"/>
+ <exception name="PatternSyntaxException" type="java.util.regex.PatternSyntaxException"/>
+ <doc>
+ <![CDATA[Define the filtering regex and stores it in conf
+ @param conf where the regex is set
+ @param regex regex used as a filter]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[configure the Filter by checking the configuration]]>
+ </doc>
+ </method>
+ <method name="accept" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Filtering method
+ If key matches the regex, return true; otherwise return false
+ @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Records filter by matching key to regex]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <class name="SequenceFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use
+ {@link org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat}
+ instead.">
+ <constructor name="SequenceFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for {@link SequenceFile}s.
+ @deprecated Use
+ {@link org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat}
+ instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <class name="SequenceFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use
+ {@link org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat}
+ instead.">
+ <constructor name="SequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getReaders" return="org.apache.hadoop.io.SequenceFile.Reader[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Open the output generated by this format.]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf}
+ @return the {@link CompressionType} for the output {@link SequenceFile},
+ defaulting to {@link CompressionType#RECORD}]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf} to modify
+ @param style the {@link CompressionType} for the output
+ {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link SequenceFile}s.
+ @deprecated Use
+ {@link org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat}
+ instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <class name="SequenceFileRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader"/>
+ <constructor name="SequenceFileRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="getKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of key that must be passed to {@link
+ #next(Object, Object)}..]]>
+ </doc>
+ </method>
+ <method name="getValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The class of value that must be passed to {@link
+ #next(Object, Object)}..]]>
+ </doc>
+ </method>
+ <method name="createKey" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentValue"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="value" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="seek"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="pos" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="conf" type="org.apache.hadoop.conf.Configuration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An {@link RecordReader} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.SkipBadRecords -->
+ <class name="SkipBadRecords" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SkipBadRecords"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getAttemptsToStartSkipping" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the number of Task attempts AFTER which skip mode
+ will be kicked off. When skip mode is kicked off, the
+ tasks reports the range of records which it will process
+ next to the TaskTracker. So that on failures, TT knows which
+ ones are possibly the bad records. On further executions,
+ those are skipped.
+ Default value is 2.
+
+ @param conf the configuration
+ @return attemptsToStartSkipping no of task attempts]]>
+ </doc>
+ </method>
+ <method name="setAttemptsToStartSkipping"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="attemptsToStartSkipping" type="int"/>
+ <doc>
+ <![CDATA[Set the number of Task attempts AFTER which skip mode
+ will be kicked off. When skip mode is kicked off, the
+ tasks reports the range of records which it will process
+ next to the TaskTracker. So that on failures, TT knows which
+ ones are possibly the bad records. On further executions,
+ those are skipped.
+ Default value is 2.
+
+ @param conf the configuration
+ @param attemptsToStartSkipping no of task attempts]]>
+ </doc>
+ </method>
+ <method name="getAutoIncrMapperProcCount" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the flag which if set to true,
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented
+ by MapRunner after invoking the map function. This value must be set to
+ false for applications which process the records asynchronously
+ or buffer the input records. For example streaming.
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+
+ @param conf the configuration
+ @return <code>true</code> if auto increment
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}.
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setAutoIncrMapperProcCount"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="autoIncr" type="boolean"/>
+ <doc>
+ <![CDATA[Set the flag which if set to true,
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented
+ by MapRunner after invoking the map function. This value must be set to
+ false for applications which process the records asynchronously
+ or buffer the input records. For example streaming.
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+
+ @param conf the configuration
+ @param autoIncr whether to auto increment
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}.]]>
+ </doc>
+ </method>
+ <method name="getAutoIncrReducerProcCount" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the flag which if set to true,
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS} is incremented
+ by framework after invoking the reduce function. This value must be set to
+ false for applications which process the records asynchronously
+ or buffer the input records. For example streaming.
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+
+ @param conf the configuration
+ @return <code>true</code> if auto increment
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}.
+ <code>false</code> otherwise.]]>
+ </doc>
+ </method>
+ <method name="setAutoIncrReducerProcCount"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="autoIncr" type="boolean"/>
+ <doc>
+ <![CDATA[Set the flag which if set to true,
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS} is incremented
+ by framework after invoking the reduce function. This value must be set to
+ false for applications which process the records asynchronously
+ or buffer the input records. For example streaming.
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+
+ @param conf the configuration
+ @param autoIncr whether to auto increment
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}.]]>
+ </doc>
+ </method>
+ <method name="getSkipOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the directory to which skipped records are written. By default it is
+ the sub directory of the output _logs directory.
+ User can stop writing skipped records by setting the value null.
+
+ @param conf the configuration.
+ @return path skip output directory. Null is returned if this is not set
+ and output directory is also not set.]]>
+ </doc>
+ </method>
+ <method name="setSkipOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the directory to which skipped records are written. By default it is
+ the sub directory of the output _logs directory.
+ User can stop writing skipped records by setting the value null.
+
+ @param conf the configuration.
+ @param path skip output directory path]]>
+ </doc>
+ </method>
+ <method name="getMapperMaxSkipRecords" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the number of acceptable skip records surrounding the bad record PER
+ bad record in mapper. The number includes the bad record as well.
+ To turn the feature of detection/skipping of bad records off, set the
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying
+ until this threshold is met OR all attempts get exhausted for this task.
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to
+ narrow down. Whatever records(depends on application) get skipped are
+ acceptable.
+ Default value is 0.
+
+ @param conf the configuration
+ @return maxSkipRecs acceptable skip records.]]>
+ </doc>
+ </method>
+ <method name="setMapperMaxSkipRecords"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="maxSkipRecs" type="long"/>
+ <doc>
+ <![CDATA[Set the number of acceptable skip records surrounding the bad record PER
+ bad record in mapper. The number includes the bad record as well.
+ To turn the feature of detection/skipping of bad records off, set the
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying
+ until this threshold is met OR all attempts get exhausted for this task.
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to
+ narrow down. Whatever records(depends on application) get skipped are
+ acceptable.
+ Default value is 0.
+
+ @param conf the configuration
+ @param maxSkipRecs acceptable skip records.]]>
+ </doc>
+ </method>
+ <method name="getReducerMaxSkipGroups" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[Get the number of acceptable skip groups surrounding the bad group PER
+ bad group in reducer. The number includes the bad group as well.
+ To turn the feature of detection/skipping of bad groups off, set the
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying
+ until this threshold is met OR all attempts get exhausted for this task.
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to
+ narrow down. Whatever groups(depends on application) get skipped are
+ acceptable.
+ Default value is 0.
+
+ @param conf the configuration
+ @return maxSkipGrps acceptable skip groups.]]>
+ </doc>
+ </method>
+ <method name="setReducerMaxSkipGroups"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="maxSkipGrps" type="long"/>
+ <doc>
+ <![CDATA[Set the number of acceptable skip groups surrounding the bad group PER
+ bad group in reducer. The number includes the bad group as well.
+ To turn the feature of detection/skipping of bad groups off, set the
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying
+ until this threshold is met OR all attempts get exhausted for this task.
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to
+ narrow down. Whatever groups(depends on application) get skipped are
+ acceptable.
+ Default value is 0.
+
+ @param conf the configuration
+ @param maxSkipGrps acceptable skip groups.]]>
+ </doc>
+ </method>
+ <field name="COUNTER_GROUP" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Special counters which are written by the application and are
+ used by the framework for detecting bad records. For detecting bad records
+ these counters must be incremented by the application.]]>
+ </doc>
+ </field>
+ <field name="COUNTER_MAP_PROCESSED_RECORDS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of processed map records.
+ @see SkipBadRecords#getAutoIncrMapperProcCount(Configuration)]]>
+ </doc>
+ </field>
+ <field name="COUNTER_REDUCE_PROCESSED_GROUPS" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Number of processed reduce groups.
+ @see SkipBadRecords#getAutoIncrReducerProcCount(Configuration)]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[Utility class for skip bad records functionality. It contains various
+ settings related to skipping of bad records.
+
+ <p>Hadoop provides an optional mode of execution in which the bad records
+ are detected and skipped in further attempts.
+
+ <p>This feature can be used when map/reduce tasks crashes deterministically on
+ certain input. This happens due to bugs in the map/reduce function. The usual
+ course would be to fix these bugs. But sometimes this is not possible;
+ perhaps the bug is in third party libraries for which the source code is
+ not available. Due to this, the task never reaches to completion even with
+ multiple attempts and complete data for that task is lost.</p>
+
+ <p>With this feature, only a small portion of data is lost surrounding
+ the bad record, which may be acceptable for some user applications.
+ see {@link SkipBadRecords#setMapperMaxSkipRecords(Configuration, long)}</p>
+
+ <p>The skipping mode gets kicked off after certain no of failures
+ see {@link SkipBadRecords#setAttemptsToStartSkipping(Configuration, int)}</p>
+
+ <p>In the skipping mode, the map/reduce task maintains the record range which
+ is getting processed at all times. Before giving the input to the
+ map/reduce function, it sends this record range to the Task tracker.
+ If task crashes, the Task tracker knows which one was the last reported
+ range. On further attempts that range get skipped.</p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.SkipBadRecords -->
+ <!-- start class org.apache.hadoop.mapred.TaskAttemptContext -->
+ <class name="TaskAttemptContext" extends="org.apache.hadoop.mapreduce.TaskAttemptContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.TaskAttemptContext}
+ instead.">
+ <method name="getTaskAttemptID" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the taskAttemptID.
+
+ @return TaskAttemptID]]>
+ </doc>
+ </method>
+ <method name="getProgressible" return="org.apache.hadoop.util.Progressable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[@deprecated Use {@link org.apache.hadoop.mapreduce.TaskAttemptContext}
+ instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskAttemptContext -->
+ <!-- start class org.apache.hadoop.mapred.TaskAttemptID -->
+ <class name="TaskAttemptID" extends="org.apache.hadoop.mapreduce.TaskAttemptID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskAttemptID" type="org.apache.hadoop.mapred.TaskID, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskAttemptID object from given {@link TaskID}.
+ @param taskId TaskID that this task belongs to
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskAttemptID" type="java.lang.String, int, boolean, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param taskId taskId number
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskAttemptID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="downgrade" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="old" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Downgrade a new TaskAttemptID to an old one
+ @param old the new id
+ @return either old or a new TaskAttemptID constructed to match old]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskAttemptID object from given string
+ @return constructed TaskAttemptID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <method name="getTaskAttemptIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <param name="isMap" type="java.lang.Boolean"/>
+ <param name="taskId" type="java.lang.Integer"/>
+ <param name="attemptId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task attempt IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>all task attempt IDs</i>
+ of <i>any jobtracker</i>, in <i>any job</i>, of the <i>first
+ map task</i>, we would use :
+ <pre>
+ TaskAttemptID.getTaskAttemptIDsPattern(null, null, true, 1, null);
+ </pre>
+ which will return :
+ <pre> "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null
+ @param taskId taskId number, or null
+ @param attemptId the task attempt number, or null
+ @return a regex pattern matching TaskAttemptIDs]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[TaskAttemptID represents the immutable and unique identifier for
+ a task attempt. Each task attempt is one particular instance of a Map or
+ Reduce Task identified by its TaskID.
+
+ TaskAttemptID consists of 2 parts. First part is the
+ {@link TaskID}, that this TaskAttemptID belongs to.
+ Second part is the task attempt number. <br>
+ An example TaskAttemptID is :
+ <code>attempt_200707121733_0003_m_000005_0</code> , which represents the
+ zeroth task attempt for the fifth map task in the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskAttemptID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskAttemptID -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <class name="TaskCompletionEvent" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskCompletionEvent"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default constructor for Writable.]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskCompletionEvent" type="int, org.apache.hadoop.mapred.TaskAttemptID, int, boolean, org.apache.hadoop.mapred.TaskCompletionEvent.Status, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor. eventId should be created externally and incremented
+ per event for each job.
+ @param eventId event id, event id should be unique and assigned in
+ incrementally, starting from 0.
+ @param taskId task id
+ @param status task's status
+ @param taskTrackerHttp task tracker's host:port for http.]]>
+ </doc>
+ </constructor>
+ <method name="getEventId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns event Id.
+ @return event id]]>
+ </doc>
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getTaskAttemptId()} instead.">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id
+ @deprecated use {@link #getTaskAttemptId()} instead.]]>
+ </doc>
+ </method>
+ <method name="getTaskAttemptId" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns task id.
+ @return task id]]>
+ </doc>
+ </method>
+ <method name="getTaskStatus" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns enum Status.SUCESS or Status.FAILURE.
+ @return task tracker status]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerHttp" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[http location of the tasktracker where this task ran.
+ @return http location of tasktracker user logs]]>
+ </doc>
+ </method>
+ <method name="getTaskRunTime" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns time (in millisec) the task took to complete.]]>
+ </doc>
+ </method>
+ <method name="setTaskRunTime"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskCompletionTime" type="int"/>
+ <doc>
+ <![CDATA[Set the task completion time
+ @param taskCompletionTime time (in millisec) the task took to complete]]>
+ </doc>
+ </method>
+ <method name="setEventId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="eventId" type="int"/>
+ <doc>
+ <![CDATA[set event Id. should be assigned incrementally starting from 0.
+ @param eventId]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #setTaskID(TaskAttemptID)} instead.">
+ <param name="taskId" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId
+ @deprecated use {@link #setTaskID(TaskAttemptID)} instead.]]>
+ </doc>
+ </method>
+ <method name="setTaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Sets task id.
+ @param taskId]]>
+ </doc>
+ </method>
+ <method name="setTaskStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"/>
+ <doc>
+ <![CDATA[Set task status.
+ @param status]]>
+ </doc>
+ </method>
+ <method name="setTaskTrackerHttp"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTrackerHttp" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set task tracker http location.
+ @param taskTrackerHttp]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="isMapTask" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="idWithinJob" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="EMPTY_ARRAY" type="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This is used to track task completion events on
+ job tracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent -->
+ <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <class name="TaskCompletionEvent.Status" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="FAILED" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="KILLED" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="SUCCEEDED" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="OBSOLETE" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="TIPFAILED" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+ <!-- start class org.apache.hadoop.mapred.TaskGraphServlet -->
+ <class name="TaskGraphServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskGraphServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="width" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="height" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[height of the graph w/o margins]]>
+ </doc>
+ </field>
+ <field name="ymargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on y axis]]>
+ </doc>
+ </field>
+ <field name="xmargin" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[margin space on x axis]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The servlet that outputs svg graphics for map / reduce task
+ statuses]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskGraphServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskID -->
+ <class name="TaskID" extends="org.apache.hadoop.mapreduce.TaskID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskID" type="org.apache.hadoop.mapreduce.JobID, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskID object from given {@link JobID}.
+ @param jobId JobID that this tip belongs to
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskID" type="java.lang.String, int, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskInProgressId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="downgrade" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="old" type="org.apache.hadoop.mapreduce.TaskID"/>
+ <doc>
+ <![CDATA[Downgrade a new TaskID to an old one
+ @param old a new or old TaskID
+ @return either old or a new TaskID build to match old]]>
+ </doc>
+ </method>
+ <method name="read" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskIDsPattern" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jtIdentifier" type="java.lang.String"/>
+ <param name="jobId" type="java.lang.Integer"/>
+ <param name="isMap" type="java.lang.Boolean"/>
+ <param name="taskId" type="java.lang.Integer"/>
+ <doc>
+ <![CDATA[Returns a regex pattern which matches task IDs. Arguments can
+ be given null, in which case that part of the regex will be generic.
+ For example to obtain a regex matching <i>the first map task</i>
+ of <i>any jobtracker</i>, of <i>any job</i>, we would use :
+ <pre>
+ TaskID.getTaskIDsPattern(null, null, true, 1);
+ </pre>
+ which will return :
+ <pre> "task_[^_]*_[0-9]*_m_000001*" </pre>
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null
+ @param taskId taskId number, or null
+ @return a regex pattern matching TaskIDs]]>
+ </doc>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ </method>
+ <doc>
+ <![CDATA[TaskID represents the immutable and unique identifier for
+ a Map or Reduce Task. Each TaskID encompasses multiple attempts made to
+ execute the Map or Reduce Task, each of which are uniquely indentified by
+ their TaskAttemptID.
+
+ TaskID consists of 3 parts. First part is the {@link JobID}, that this
+ TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r'
+ representing whether the task is a map task or a reduce task.
+ And the third part is the task number. <br>
+ An example TaskID is :
+ <code>task_200707121733_0003_m_000005</code> , which represents the
+ fifth map task in the third job running at the jobtracker
+ started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskID -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog -->
+ <class name="TaskLog" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLog"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskLogFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="getRealTaskLogFileLocation" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/>
+ </method>
+ <method name="getIndexFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ </method>
+ <method name="getIndexFile" return="java.io.File"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="java.lang.String"/>
+ <param name="isCleanup" type="boolean"/>
+ </method>
+ <method name="syncLogs"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="firstTaskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="syncLogs"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="firstTaskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="isCleanup" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logsRetainHours" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Purge old user logs.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskLogLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the desired maximum length of task's logs.
+ @param conf the job to look in
+ @return the number of bytes to cap the log files at]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ If the tailLength is 0, the entire output will be saved.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="setup" type="java.util.List"/>
+ <param name="cmd" type="java.util.List"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ Setup commands such as setting memory limit can be passed which
+ will be executed before exec.
+ If the tailLength is 0, the entire output will be saved.
+ @param setup The setup commands for the execed process.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="captureOutAndError" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="setup" type="java.util.List"/>
+ <param name="cmd" type="java.util.List"/>
+ <param name="stdoutFilename" type="java.io.File"/>
+ <param name="stderrFilename" type="java.io.File"/>
+ <param name="tailLength" type="long"/>
+ <param name="pidFileName" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture stdout and stderr to files.
+ Setup commands such as setting memory limit can be passed which
+ will be executed before exec.
+ If the tailLength is 0, the entire output will be saved.
+ @param setup The setup commands for the execed process.
+ @param cmd The command and the arguments that should be run
+ @param stdoutFilename The filename that stdout should be saved to
+ @param stderrFilename The filename that stderr should be saved to
+ @param tailLength The length of the tail to be saved.
+ @param pidFileName The name of the pid-file
+ @return the modified command that should be run]]>
+ </doc>
+ </method>
+ <method name="addCommand" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List"/>
+ <param name="isExecutable" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add quotes to each of the command strings and
+ return as a single string
+ @param cmd The command to be quoted
+ @param isExecutable makes shell path if the first
+ argument is executable
+ @return returns The quoted string.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="captureDebugOut" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cmd" type="java.util.List"/>
+ <param name="debugoutFilename" type="java.io.File"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Wrap a command in a shell to capture debug script's
+ stdout and stderr to debugout.
+ @param cmd The command and the arguments that should be run
+ @param debugoutFilename The filename that stdout and stderr
+ should be saved to.
+ @return the modified command that should be run
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A simple logger to handle the task-specific user logs.
+ This class uses the system property <code>hadoop.log.dir</code>.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog -->
+ <!-- start class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <class name="TaskLog.LogName" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TaskLog.LogName[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TaskLog.LogName"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="STDOUT" type="org.apache.hadoop.mapred.TaskLog.LogName"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Log on the stdout of the task.]]>
+ </doc>
+ </field>
+ <field name="STDERR" type="org.apache.hadoop.mapred.TaskLog.LogName"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Log on the stderr of the task.]]>
+ </doc>
+ </field>
+ <field name="SYSLOG" type="org.apache.hadoop.mapred.TaskLog.LogName"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Log on the map-reduce system logs of the task.]]>
+ </doc>
+ </field>
+ <field name="PROFILE" type="org.apache.hadoop.mapred.TaskLog.LogName"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The java profiler information.]]>
+ </doc>
+ </field>
+ <field name="DEBUGOUT" type="org.apache.hadoop.mapred.TaskLog.LogName"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Log the debug script's stdout]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[The filter for userlogs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLog.LogName -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogAppender -->
+ <class name="TaskLogAppender" extends="org.apache.log4j.FileAppender"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogAppender"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="activateOptions"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="append"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+ </method>
+ <method name="flush"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Getter/Setter methods for log4j.]]>
+ </doc>
+ </method>
+ <method name="setTaskId"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="java.lang.String"/>
+ </method>
+ <method name="getTotalLogFileSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setTotalLogFileSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logSize" type="long"/>
+ </method>
+ <doc>
+ <![CDATA[A simple log4j-appender for the task child's
+ map-reduce system logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogAppender -->
+ <!-- start class org.apache.hadoop.mapred.TaskLogServlet -->
+ <class name="TaskLogServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskLogServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskLogUrl" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskTrackerHostName" type="java.lang.String"/>
+ <param name="httpPort" type="java.lang.String"/>
+ <param name="taskAttemptID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Construct the taskLogUrl
+ @param taskTrackerHostName
+ @param httpPort
+ @param taskAttemptID
+ @return the taskLogUrl]]>
+ </doc>
+ </method>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the logs via http.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A servlet that is run by the TaskTrackers to provide the task logs via http.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskLogServlet -->
+ <!-- start class org.apache.hadoop.mapred.TaskReport -->
+ <class name="TaskReport" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="TaskReport"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getTaskID()} instead">
+ <doc>
+ <![CDATA[@deprecated use {@link #getTaskID()} instead]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The id of the task.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The amount completed, between zero and one.]]>
+ </doc>
+ </method>
+ <method name="getState" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The most recent state, reported by a {@link Reporter}.]]>
+ </doc>
+ </method>
+ <method name="getDiagnostics" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A list of error messages.]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[A table of counters.]]>
+ </doc>
+ </method>
+ <method name="getCurrentStatus" return="org.apache.hadoop.mapred.TIPStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The current status]]>
+ </doc>
+ </method>
+ <method name="getFinishTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get finish time of task.
+ @return 0, if finish time was not set else returns finish time.]]>
+ </doc>
+ </method>
+ <method name="getStartTime" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get start time of task.
+ @return 0 if start time was not set, else start time.]]>
+ </doc>
+ </method>
+ <method name="setSuccessfulAttempt"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="t" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[set successful attempt ID of the task.]]>
+ </doc>
+ </method>
+ <method name="getSuccessfulTaskAttempt" return="org.apache.hadoop.mapred.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the attempt ID that took this task to completion]]>
+ </doc>
+ </method>
+ <method name="setRunningTaskAttempts"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="runningAttempts" type="java.util.Collection"/>
+ <doc>
+ <![CDATA[set running attempt(s) of the task.]]>
+ </doc>
+ </method>
+ <method name="getRunningTaskAttempts" return="java.util.Collection"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the running task attempt IDs for this task]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A report on the state of a task.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskReport -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker -->
+ <class name="TaskTracker" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MRConstants"/>
+ <implements name="org.apache.hadoop.mapred.TaskUmbilicalProtocol"/>
+ <implements name="java.lang.Runnable"/>
+ <constructor name="TaskTracker" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Start with the local machine name, and the default JobTracker]]>
+ </doc>
+ </constructor>
+ <method name="getTaskTrackerInstrumentation" return="org.apache.hadoop.mapred.TaskTrackerInstrumentation"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getProtocolVersion" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="protocol" type="java.lang.String"/>
+ <param name="clientVersion" type="long"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getInstrumentationClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="setInstrumentationClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="t" type="java.lang.Class"/>
+ </method>
+ <method name="cleanupStorage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Removes all contents of temporary storage. Called upon
+ startup, to remove any leftovers from previous run.]]>
+ </doc>
+ </method>
+ <method name="shutdown"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close down the TaskTracker and all its components. We must also shutdown
+ any running tasks or threads, and cleanup disk space. A new TaskTracker
+ within the same process space might be restarted, so everything must be
+ clean.]]>
+ </doc>
+ </method>
+ <method name="getJobClient" return="org.apache.hadoop.mapred.InterTrackerProtocol"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The connection to the JobTracker, used by the TaskRunner
+ for locating remote files.]]>
+ </doc>
+ </method>
+ <method name="getTaskTrackerReportAddress" return="java.net.InetSocketAddress"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the port at which the tasktracker bound to]]>
+ </doc>
+ </method>
+ <method name="getJvmManagerInstance" return="org.apache.hadoop.mapred.JvmManager"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The server retry loop.
+ This while-loop attempts to connect to the JobTracker. It only
+ loops when the old TaskTracker has gone bad (its state is
+ stale somehow) and we need to reinitialize everything.]]>
+ </doc>
+ </method>
+ <method name="getTask" return="org.apache.hadoop.mapred.JvmTask"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jvmId" type="org.apache.hadoop.mapred.JVMId"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called upon startup by the child process, to fetch Task data.]]>
+ </doc>
+ </method>
+ <method name="statusUpdate" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskStatus" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called periodically to report Task progress, from 0.0 to 1.0.]]>
+ </doc>
+ </method>
+ <method name="reportDiagnosticInfo"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="info" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Called when the task dies before completion, and we want to report back
+ diagnostic info]]>
+ </doc>
+ </method>
+ <method name="reportNextRecordRange"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="range" type="org.apache.hadoop.mapred.SortedRanges.Range"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="ping" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Child checking to see if we're alive. Normally does nothing.]]>
+ </doc>
+ </method>
+ <method name="commitPending"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="taskStatus" type="org.apache.hadoop.mapred.TaskStatus"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Task is reporting that it is in commit_pending
+ and it is waiting for the commit Response]]>
+ </doc>
+ </method>
+ <method name="canCommit" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <doc>
+ <![CDATA[Child checking whether it can commit]]>
+ </doc>
+ </method>
+ <method name="done"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The task is done.]]>
+ </doc>
+ </method>
+ <method name="shuffleError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A reduce-task failed to shuffle the map-outputs. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="fsError"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="message" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A child task had a local filesystem error. Kill the task.]]>
+ </doc>
+ </method>
+ <method name="getMapCompletionEvents" return="org.apache.hadoop.mapred.MapTaskCompletionEventsUpdate"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+ <param name="fromEventId" type="int"/>
+ <param name="maxLocs" type="int"/>
+ <param name="id" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="mapOutputLost"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+ <param name="errorMsg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A completed map task's output has been lost.]]>
+ </doc>
+ </method>
+ <method name="isIdle" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is this task tracker idle?
+ @return has this task tracker finished and cleaned up all of its tasks?]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="argv" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Start the TaskTracker, point toward the indicated JobTracker]]>
+ </doc>
+ </method>
+ <method name="isTaskMemoryManagerEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Is the TaskMemoryManager Enabled on this system?
+ @return true if enabled, false otherwise.]]>
+ </doc>
+ </method>
+ <method name="getTaskMemoryManager" return="org.apache.hadoop.mapred.TaskMemoryManagerThread"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="MR_CLIENTTRACE_FORMAT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ClientTraceLog" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[TaskTracker is a process that starts and tracks MR Tasks
+ in a networked environment. It contacts the JobTracker
+ for Task assignments and reporting results.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker -->
+ <!-- start class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <class name="TaskTracker.MapOutputServlet" extends="javax.servlet.http.HttpServlet"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskTracker.MapOutputServlet"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doGet"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="request" type="javax.servlet.http.HttpServletRequest"/>
+ <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+ <exception name="ServletException" type="javax.servlet.ServletException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class is used in TaskTracker's Jetty to serve the map outputs
+ to other nodes.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet -->
+ <!-- start class org.apache.hadoop.mapred.TextInputFormat -->
+ <class name="TextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.lib.input.TextInputFormat}
+ instead.">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="TextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Keys are
+ the position in the file, and values are the line of text..
+ @deprecated Use {@link org.apache.hadoop.mapreduce.lib.input.TextInputFormat}
+ instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat -->
+ <class name="TextOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use
+ {@link org.apache.hadoop.mapreduce.lib.output.TextOutputFormat} instead.">
+ <constructor name="TextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes plain text files.
+ @deprecated Use
+ {@link org.apache.hadoop.mapreduce.lib.output.TextOutputFormat} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+ <class name="TextOutputFormat.LineRecordWriter" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordWriter"/>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="out" type="java.io.DataOutputStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter -->
+ <!-- start class org.apache.hadoop.mapred.TIPStatus -->
+ <class name="TIPStatus" extends="java.lang.Enum"
+ abstract="false"
+ static="false" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.TIPStatus[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.TIPStatus"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="PENDING" type="org.apache.hadoop.mapred.TIPStatus"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNNING" type="org.apache.hadoop.mapred.TIPStatus"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMPLETE" type="org.apache.hadoop.mapred.TIPStatus"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="KILLED" type="org.apache.hadoop.mapred.TIPStatus"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="org.apache.hadoop.mapred.TIPStatus"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The states of a {@link TaskInProgress} as seen by the JobTracker.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.TIPStatus -->
+</package>
+<package name="org.apache.hadoop.mapred.jobcontrol">
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <class name="Job" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf, java.util.ArrayList"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+ @param jobConf a mapred job configuration representing a job to be executed.
+ @param dependingJobs an array of jobs the current job depends on]]>
+ </doc>
+ </constructor>
+ <constructor name="Job" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a job.
+
+ @param jobConf mapred job configuration representing a job to be executed.
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job name of this job]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job name for this job.
+ @param jobName the job name]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job ID of this job assigned by JobControl]]>
+ </doc>
+ </method>
+ <method name="setJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="id" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the job ID for this job.
+ @param id the job ID]]>
+ </doc>
+ </method>
+ <method name="getMapredJobID" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #getAssignedJobID()} instead">
+ <doc>
+ <![CDATA[@return the mapred ID of this job
+ @deprecated use {@link #getAssignedJobID()} instead]]>
+ </doc>
+ </method>
+ <method name="setMapredJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="use {@link #setAssignedJobID(JobID)} instead">
+ <param name="mapredJobID" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job.
+ @param mapredJobID the mapred job ID for this job.
+ @deprecated use {@link #setAssignedJobID(JobID)} instead]]>
+ </doc>
+ </method>
+ <method name="getAssignedJobID" return="org.apache.hadoop.mapred.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred ID of this job as assigned by the
+ mapred framework.]]>
+ </doc>
+ </method>
+ <method name="setAssignedJobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="mapredJobID" type="org.apache.hadoop.mapred.JobID"/>
+ <doc>
+ <![CDATA[Set the mapred ID for this job as assigned by the
+ mapred framework.
+ @param mapredJobID the mapred job ID for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the mapred job conf of this job]]>
+ </doc>
+ </method>
+ <method name="setJobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Set the mapred job conf for this job.
+ @param jobConf the mapred job conf for this job.]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the state of this job]]>
+ </doc>
+ </method>
+ <method name="setState"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="state" type="int"/>
+ <doc>
+ <![CDATA[Set the state for this job.
+ @param state the new state for this job.]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the message of this job]]>
+ </doc>
+ </method>
+ <method name="setMessage"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="message" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the message for this job.
+ @param message the message for this job.]]>
+ </doc>
+ </method>
+ <method name="getJobClient" return="org.apache.hadoop.mapred.JobClient"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the job client of this job]]>
+ </doc>
+ </method>
+ <method name="getDependingJobs" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the depending jobs of this job]]>
+ </doc>
+ </method>
+ <method name="addDependingJob" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dependingJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a job to this jobs' dependency list. Dependent jobs can only be added while a Job
+ is waiting to run, not during or afterwards.
+
+ @param dependingJob Job that this Job depends on.
+ @return <tt>true</tt> if the Job was added.]]>
+ </doc>
+ </method>
+ <method name="isCompleted" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in a complete state]]>
+ </doc>
+ </method>
+ <method name="isReady" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return true if this job is in READY state]]>
+ </doc>
+ </method>
+ <method name="submit"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Submit this job to mapred. The state becomes RUNNING if submission
+ is successful, FAILED otherwise.]]>
+ </doc>
+ </method>
+ <field name="SUCCESS" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="WAITING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNNING" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="READY" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DEPENDENT_FAILED" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class encapsulates a MapReduce job and its dependency. It monitors
+ the states of the depending jobs and updates the state of this job.
+ A job starts in the WAITING state. If it does not have any depending jobs, or
+ all of the depending jobs are in SUCCESS state, then the job state will become
+ READY. If any depending jobs fail, the job will fail too.
+ When in READY state, the job can be submitted to Hadoop for execution, with
+ the state changing into RUNNING state. From RUNNING state, the job can get into
+ SUCCESS or FAILED state, depending the status of the job execution.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.Job -->
+ <!-- start class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+ <class name="JobControl" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Runnable"/>
+ <constructor name="JobControl" type="java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Construct a job control for a group of jobs.
+ @param groupName a name identifying this group]]>
+ </doc>
+ </constructor>
+ <method name="getWaitingJobs" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the waiting state]]>
+ </doc>
+ </method>
+ <method name="getRunningJobs" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the running state]]>
+ </doc>
+ </method>
+ <method name="getReadyJobs" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the ready state]]>
+ </doc>
+ </method>
+ <method name="getSuccessfulJobs" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the jobs in the success state]]>
+ </doc>
+ </method>
+ <method name="getFailedJobs" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="addJob" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="aJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+ <doc>
+ <![CDATA[Add a new job.
+ @param aJob the new job]]>
+ </doc>
+ </method>
+ <method name="addJobs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobs" type="java.util.Collection"/>
+ <doc>
+ <![CDATA[Add a collection of jobs
+
+ @param jobs]]>
+ </doc>
+ </method>
+ <method name="getState" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the thread state]]>
+ </doc>
+ </method>
+ <method name="stop"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[set the thread state to STOPPING so that the
+ thread will stop when it wakes up.]]>
+ </doc>
+ </method>
+ <method name="suspend"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[suspend the running thread]]>
+ </doc>
+ </method>
+ <method name="resume"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[resume the suspended thread]]>
+ </doc>
+ </method>
+ <method name="allFinished" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The main loop for the thread.
+ The loop does the following:
+ Check the states of the running jobs
+ Update the states of waiting jobs
+ Submit the jobs in ready state]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class encapsulates a set of MapReduce jobs and its dependency. It tracks
+ the states of the jobs by placing them into different tables according to their
+ states.
+
+ This class provides APIs for the client app to add a job to the group and to get
+ the jobs in the group in different states. When a
+ job is added, an ID unique to the group is assigned to the job.
+
+ This class has a thread that submits jobs when they become ready, monitors the
+ states of the running jobs, and updates the states of jobs based on the state changes
+ of their depending jobs states. The class provides APIs for suspending/resuming
+ the thread,and for stopping the thread.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+</package>
+<package name="org.apache.hadoop.mapred.join">
+ <!-- start class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <class name="ArrayListBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator"/>
+ <constructor name="ArrayListBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="ArrayListBackedIterator" type="java.util.ArrayList"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. The
+ implementation uses an {@link java.util.ArrayList} to store elements
+ added to it, replaying them as requested.
+ Prefer {@link StreamBackedIterator}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <interface name="ComposableInputFormat" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat"/>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Refinement of InputFormat requiring implementors to provide
+ ComposableRecordReader instead of RecordReader.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+ <!-- start interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <interface name="ComposableRecordReader" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader"/>
+ <implements name="java.lang.Comparable"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="key" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key this RecordReader would supply on a call to next(K,V)]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RecordReader into the object provided.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns true if the stream is not empty, but provides no guarantee that
+ a call to next(K,V) will succeed.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[While key-value pairs from this RecordReader match the given key, register
+ them with the JoinCollector provided.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Additional operations required of a RecordReader to participate in a join.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <class name="CompositeInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat"/>
+ <constructor name="CompositeInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setFormat"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Interpret a given string as a composite expression.
+ {@code
+ func ::= <ident>([<func>,]*<func>)
+ func ::= tbl(<class>,"<path>")
+ class ::= @see java.lang.Class#forName(java.lang.String)
+ path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String)
+ }
+ Reads expression from the <tt>mapred.join.expr</tt> property and
+ user-supplied join types from <tt>mapred.join.define.&lt;ident&gt;</tt>
+ types. Paths supplied to <tt>tbl</tt> are given as input paths to the
+ InputFormat class listed.
+ @see #compose(java.lang.String, java.lang.Class, java.lang.String...)]]>
+ </doc>
+ </method>
+ <method name="addDefaults"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Adds the default set of identifiers to the parser.]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Build a CompositeInputSplit from the child InputFormats by assigning the
+ ith split from each child to the ith composite split.]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Construct a CompositeRecordReader for the children of this InputFormat
+ as defined in the init expression.
+ The outermost join need only be composable, not necessarily a composite.
+ Mandating TupleWritable isn't strictly correct.]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="java.lang.Class"/>
+ <param name="path" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given InputFormat class (inf), path (p) return:
+ {@code tbl(<inf>, <p>) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class"/>
+ <param name="path" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <method name="compose" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="op" type="java.lang.String"/>
+ <param name="inf" type="java.lang.Class"/>
+ <param name="path" type="org.apache.hadoop.fs.Path[]"/>
+ <doc>
+ <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An InputFormat capable of performing joins over a set of data sources sorted
+ and partitioned the same way.
+ @see #setFormat
+
+ A user may define new join types by setting the property
+ <tt>mapred.join.define.&lt;ident&gt;</tt> to a classname. In the expression
+ <tt>mapred.join.expr</tt>, the identifier will be assumed to be a
+ ComposableRecordReader.
+ <tt>mapred.join.keycomparator</tt> can be a classname used to compare keys
+ in the join.
+ @see JoinRecordReader
+ @see MultiFilterRecordReader]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <class name="CompositeInputSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="CompositeInputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CompositeInputSplit" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="s" type="org.apache.hadoop.mapred.InputSplit"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an InputSplit to this collection.
+ @throws IOException If capacity was not specified during construction
+ or if capacity has been reached.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.mapred.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the aggregate length of all child InputSplits currently added.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the length of ith child InputSplit.]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Collect a set of hosts from all child InputSplits.]]>
+ </doc>
+ </method>
+ <method name="getLocation" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[getLocations from ith InputSplit.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write splits in the following format.
+ {@code
+ <count><class1><class2>...<classn><split1><split2>...<splitn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}
+ @throws IOException If the child InputSplit cannot be read, typically
+ for faliing access checks.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This InputSplit contains a set of child InputSplits. Any InputSplit inserted
+ into this collection must have a public default constructor.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+ <!-- start class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <class name="CompositeRecordReader" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="CompositeRecordReader" type="int, int, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a RecordReader with <tt>capacity</tt> children to position
+ <tt>id</tt> in the parent reader.
+ The id of a root CompositeRecordReader is -1 by convention, but relying
+ on this is not recommended.]]>
+ </doc>
+ </constructor>
+ <method name="combine" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ </method>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the position in the collector this class occupies.]]>
+ </doc>
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordReaderQueue" return="java.util.PriorityQueue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return sorted list of RecordReaders for this composite.]]>
+ </doc>
+ </method>
+ <method name="getComparator" return="org.apache.hadoop.io.WritableComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return comparator defining the ordering for RecordReaders in this
+ composite.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rr" type="org.apache.hadoop.mapred.join.ComposableRecordReader"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a RecordReader to this collection.
+ The id() of a RecordReader determines where in the Tuple its
+ entry will appear. Adding RecordReaders with the same id has
+ undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="key" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key for the current join or the value at the top of the
+ RecordReader heap.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the top of this RR into the given object.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if it is possible that this could emit more values.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Pass skip key to child RRs.]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Obtain an iterator over the child RRs apropos of the value type
+ ultimately emitted from this join.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[If key provided matches that of this Composite, give JoinCollector
+ iterator over values it may emit.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For all child RRs offering the key provided, obtain an iterator
+ at that position in the JoinCollector.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key of join or head of heap
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="createKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new key value common to all child RRs.
+ @throws ClassCastException if key classes differ.]]>
+ </doc>
+ </method>
+ <method name="createInternalValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a value to be used internally for joins.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Unsupported (returns zero in all cases).]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close all child RRs.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Report progress as the minimum of all child RR progress.]]>
+ </doc>
+ </method>
+ <field name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="kids" type="org.apache.hadoop.mapred.join.ComposableRecordReader[]"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A RecordReader that can effect joins of RecordReaders sharing a common key
+ type and partitioning.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <class name="InnerJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Return true iff the tuple is full (all data sources contain this key).]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full inner join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <class name="JoinRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader"/>
+ <constructor name="JoinRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Emit the next set of key, value pairs as defined by the child
+ RecordReaders and operation associated with this composite RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator wrapping the JoinCollector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite joins returning Tuples of arbitrary Writables.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <class name="JoinRecordReader.JoinDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator"/>
+ <constructor name="JoinRecordReader.JoinDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Since the JoinCollector is effecting our operation, we need only
+ provide an iterator proxy wrapping its operation.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <class name="MultiFilterRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader"/>
+ <constructor name="MultiFilterRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="emit" return="org.apache.hadoop.io.Writable"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For each tuple emitted, return a value (typically one of the values
+ in the tuple).
+ Modifying the Writables in the tuple is permitted and unlikely to affect
+ join behavior in most cases, but it is not recommended. It's safer to
+ clone first.]]>
+ </doc>
+ </method>
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Default implementation offers {@link #emit} every Tuple from the
+ collector (the outer join of child RRs).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator returning a single value from the tuple.
+ @see MultiFilterDelegationIterator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Base class for Composite join returning values derived from multiple
+ sources, but generally not tuples.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <class name="MultiFilterRecordReader.MultiFilterDelegationIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator"/>
+ <constructor name="MultiFilterRecordReader.MultiFilterDelegationIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy the JoinCollector, but include callback to emit.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <class name="OuterJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="combine" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="srcs" type="java.lang.Object[]"/>
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit everything from the collector.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Full outer join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <class name="OverrideRecordReader" extends="org.apache.hadoop.mapred.join.MultiFilterRecordReader"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="emit" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+ <doc>
+ <![CDATA[Emit the value with the highest position in the tuple.]]>
+ </doc>
+ </method>
+ <method name="fillJoinCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="iterkey" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Instead of filling the JoinCollector with iterators from all
+ data sources, fill only the rightmost for this key.
+ This not only saves space by discarding the other sources, but
+ it also emits the number of key-value pairs in the preferred
+ RecordReader instead of repeating that stream n times, where
+ n is the cardinality of the cross product of the discarded
+ streams for the given key.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Prefer the &quot;rightmost&quot; data source for this key.
+ For example, <tt>override(S1,S2,S3)</tt> will prefer values
+ from S3 over S2, and values from S2 over S1 for all keys
+ emitted from all sources.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser -->
+ <class name="Parser" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <doc>
+ <![CDATA[Very simple shift-reduce parser for join expressions.
+
+ This should be sufficient for the user extension permitted now, but ought to
+ be replaced with a parser generator if more complex grammars are supported.
+ In particular, this &quot;shift-reduce&quot; parser has no states. Each set
+ of formals requires a different internal node type, which is responsible for
+ interpreting the list of tokens it receives. This is sufficient for the
+ current grammar, but it has several annoying properties that might inhibit
+ extension. In particular, parenthesis are always function calls; an
+ algebraic or filter grammar would not only require a node type, but must
+ also work around the internals of this parser.
+
+ For most other cases, adding classes to the hierarchy- particularly by
+ extending JoinRecordReader and MultiFilterRecordReader- is fairly
+ straightforward. One need only override the relevant method(s) (usually only
+ {@link CompositeRecordReader#combine}) and include a property to map its
+ value to an identifier in the parser.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Node -->
+ <class name="Parser.Node" extends="java.lang.Object"
+ abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat"/>
+ <constructor name="Parser.Node" type="java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addIdentifier"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="ident" type="java.lang.String"/>
+ <param name="mcstrSig" type="java.lang.Class[]"/>
+ <param name="nodetype" type="java.lang.Class"/>
+ <param name="cl" type="java.lang.Class"/>
+ <exception name="NoSuchMethodException" type="java.lang.NoSuchMethodException"/>
+ <doc>
+ <![CDATA[For a given identifier, add a mapping to the nodetype for the parse
+ tree and to the ComposableRecordReader to be created, including the
+ formals required to invoke the constructor.
+ The nodetype and constructor signature should be filled in from the
+ child node.]]>
+ </doc>
+ </method>
+ <method name="setID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="id" type="int"/>
+ </method>
+ <method name="setKeyComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="cmpcl" type="java.lang.Class"/>
+ </method>
+ <field name="rrCstrMap" type="java.util.Map"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="ident" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="cmpcl" type="java.lang.Class"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Node -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <class name="Parser.NodeToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <class name="Parser.NumToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.NumToken" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.NumToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <class name="Parser.StrToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Parser.StrToken" type="org.apache.hadoop.mapred.join.Parser.TType, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.StrToken -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.Token -->
+ <class name="Parser.Token" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getType" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getNum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getStr" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Tagged-union type for tokens from the join expression.
+ @see Parser.TType]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.Token -->
+ <!-- start class org.apache.hadoop.mapred.join.Parser.TType -->
+ <class name="Parser.TType" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapred.join.Parser.TType[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapred.join.Parser.TType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="CIF" type="org.apache.hadoop.mapred.join.Parser.TType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="IDENT" type="org.apache.hadoop.mapred.join.Parser.TType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMMA" type="org.apache.hadoop.mapred.join.Parser.TType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LPAREN" type="org.apache.hadoop.mapred.join.Parser.TType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RPAREN" type="org.apache.hadoop.mapred.join.Parser.TType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="QUOT" type="org.apache.hadoop.mapred.join.Parser.TType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="NUM" type="org.apache.hadoop.mapred.join.Parser.TType"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.Parser.TType -->
+ <!-- start interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <interface name="ResetableIterator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[True if a call to next may return a value. This is permitted false
+ positives, but not false negatives.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign next value to actual.
+ It is required that elements added to a ResetableIterator be returned in
+ the same order after a call to {@link #reset} (FIFO).
+
+ Note that a call to this may fail for nested joins (i.e. more elements
+ available, but none satisfying the constraints of the join)]]>
+ </doc>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Assign last value returned to actual.]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Set iterator to return to the start of its range. Must be called after
+ calling {@link #add} to avoid a ConcurrentModificationException.]]>
+ </doc>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an element to the collection of elements to iterate over.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close datasources and release resources. Calling methods on the iterator
+ after calling close has undefined behavior.]]>
+ </doc>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Close datasources, but do not release internal resources. Calling this
+ method should permit the object to be reused with a different datasource.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This defines an interface to a stateful Iterator that can replay elements
+ added to it directly.
+ Note that this does not extend {@link java.util.Iterator}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.join.ResetableIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <class name="ResetableIterator.EMPTY" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator"/>
+ <constructor name="ResetableIterator.EMPTY"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY -->
+ <!-- start class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <class name="StreamBackedIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ResetableIterator"/>
+ <constructor name="StreamBackedIterator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="replay" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="add"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="item" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="clear"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[This class provides an implementation of ResetableIterator. This
+ implementation uses a byte array to store elements added to it.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+ <!-- start class org.apache.hadoop.mapred.join.TupleWritable -->
+ <class name="TupleWritable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable"/>
+ <constructor name="TupleWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create an empty tuple with no allocated storage for writables.]]>
+ </doc>
+ </constructor>
+ <constructor name="TupleWritable" type="org.apache.hadoop.io.Writable[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Initialize tuple with storage; unknown whether any of them contain
+ &quot;written&quot; values.]]>
+ </doc>
+ </constructor>
+ <method name="has" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Return true if tuple has an element at the position provided.]]>
+ </doc>
+ </method>
+ <method name="get" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Get ith Writable from Tuple.]]>
+ </doc>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of children in this Tuple.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return an iterator over the elements in this tuple.
+ Note that this doesn't flatten the tuple; one may receive tuples
+ from this iterator.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convert Tuple to String as in the following.
+ <tt>[<child1>,<child2>,...,<childn>]</tt>]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes each Writable to <code>out</code>.
+ TupleWritable format:
+ {@code
+ <count><type1><type2>...<typen><obj1><obj2>...<objn>
+ }]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Writable type storing multiple {@link org.apache.hadoop.io.Writable}s.
+
+ This is *not* a general-purpose tuple type. In almost all cases, users are
+ encouraged to implement their own serializable types, which can perform
+ better validation and provide more efficient encodings than this class is
+ capable. TupleWritable relies on the join framework for type safety and
+ assumes its instances will rarely be persisted, assumptions not only
+ incompatible with, but contrary to the general case.
+
+ @see org.apache.hadoop.io.Writable]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.TupleWritable -->
+ <!-- start class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+ <class name="WrappedRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader"/>
+ <method name="id" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="key" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the key at the head of this RR.]]>
+ </doc>
+ </method>
+ <method name="key"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="qkey" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Clone the key at the head of this RR into the object supplied.]]>
+ </doc>
+ </method>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return true if the RR- including the k,v pair stored in this object-
+ is exhausted.]]>
+ </doc>
+ </method>
+ <method name="skip"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the next k,v pair into the head of this object; return true iff
+ the RR and this are exhausted.]]>
+ </doc>
+ </method>
+ <method name="accept"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add an iterator to the collector at the position occupied by this
+ RecordReader over the values in this stream paired with the key
+ provided (ie register a stream of values from this source matching K
+ with a collector).]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write key-value pair at the head of this stream to the objects provided;
+ get next key-value pair from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createKey" return="org.apache.hadoop.io.WritableComparable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new key from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="createValue" return="org.apache.hadoop.io.Writable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Request new value from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request progress from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Request position from proxied RR.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Forward close request to proxied RR.]]>
+ </doc>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader"/>
+ <doc>
+ <![CDATA[Implement Comparable contract (compare key at head of proxied RR
+ with that of another).]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Return true iff compareTo(other) retn true.]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[Proxy class for a RecordReader participating in the join framework.
+ This class keeps track of the &quot;head&quot; key-value pair for the
+ provided RecordReader and keeps a store of values matching a key when
+ this source is participating in a join.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+</package>
+<package name="org.apache.hadoop.mapred.lib">
+ <!-- start class org.apache.hadoop.mapred.lib.ChainMapper -->
+ <class name="ChainMapper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper"/>
+ <constructor name="ChainMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor.]]>
+ </doc>
+ </constructor>
+ <method name="addMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="klass" type="java.lang.Class"/>
+ <param name="inputKeyClass" type="java.lang.Class"/>
+ <param name="inputValueClass" type="java.lang.Class"/>
+ <param name="outputKeyClass" type="java.lang.Class"/>
+ <param name="outputValueClass" type="java.lang.Class"/>
+ <param name="byValue" type="boolean"/>
+ <param name="mapperConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Adds a Mapper class to the chain job's JobConf.
+ <p/>
+ It has to be specified how key and values are passed from one element of
+ the chain to the next, by value or by reference. If a Mapper leverages the
+ assumed semantics that the key and values are not modified by the collector
+ 'by value' must be used. If the Mapper does not expect this semantics, as
+ an optimization to avoid serialization and deserialization 'by reference'
+ can be used.
+ <p/>
+ For the added Mapper the configuration given for it,
+ <code>mapperConf</code>, have precedence over the job's JobConf. This
+ precedence is in effect when the task is running.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain
+ <p/>
+
+ @param job job's JobConf to add the Mapper class.
+ @param klass the Mapper class to add.
+ @param inputKeyClass mapper input key class.
+ @param inputValueClass mapper input value class.
+ @param outputKeyClass mapper output key class.
+ @param outputValueClass mapper output value class.
+ @param byValue indicates if key/values should be passed by value
+ to the next Mapper in the chain, if any.
+ @param mapperConf a JobConf with the configuration for the Mapper
+ class. It is recommended to use a JobConf without default values using the
+ <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configures the ChainMapper and all the Mappers in the chain.
+ <p/>
+ If this method is overriden <code>super.configure(...)</code> should be
+ invoked at the beginning of the overwriter method.]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Chains the <code>map(...)</code> methods of the Mappers in the chain.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes the ChainMapper and all the Mappers in the chain.
+ <p/>
+ If this method is overriden <code>super.close()</code> should be
+ invoked at the end of the overwriter method.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The ChainMapper class allows to use multiple Mapper classes within a single
+ Map task.
+ <p/>
+ The Mapper classes are invoked in a chained (or piped) fashion, the output of
+ the first becomes the input of the second, and so on until the last Mapper,
+ the output of the last Mapper will be written to the task's output.
+ <p/>
+ The key functionality of this feature is that the Mappers in the chain do not
+ need to be aware that they are executed in a chain. This enables having
+ reusable specialized Mappers that can be combined to perform composite
+ operations within a single task.
+ <p/>
+ Special care has to be taken when creating chains that the key/values output
+ by a Mapper are valid for the following Mapper in the chain. It is assumed
+ all Mappers and the Reduce in the chain use maching output and input key and
+ value classes as no conversion is done by the chaining code.
+ <p/>
+ Using the ChainMapper and the ChainReducer classes is possible to compose
+ Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
+ immediate benefit of this pattern is a dramatic reduction in disk IO.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain.
+ <p/>
+ ChainMapper usage pattern:
+ <p/>
+ <pre>
+ ...
+ conf.setJobName("chain");
+ conf.setInputFormat(TextInputFormat.class);
+ conf.setOutputFormat(TextOutputFormat.class);
+ <p/>
+ JobConf mapAConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
+ Text.class, Text.class, true, mapAConf);
+ <p/>
+ JobConf mapBConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
+ LongWritable.class, Text.class, false, mapBConf);
+ <p/>
+ JobConf reduceConf = new JobConf(false);
+ ...
+ ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
+ Text.class, Text.class, true, reduceConf);
+ <p/>
+ ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
+ LongWritable.class, Text.class, false, null);
+ <p/>
+ ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
+ LongWritable.class, LongWritable.class, true, null);
+ <p/>
+ FileInputFormat.setInputPaths(conf, inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+ ...
+ <p/>
+ JobClient jc = new JobClient(conf);
+ RunningJob job = jc.submitJob(conf);
+ ...
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.ChainMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.ChainReducer -->
+ <class name="ChainReducer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Reducer"/>
+ <constructor name="ChainReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructor.]]>
+ </doc>
+ </constructor>
+ <method name="setReducer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="klass" type="java.lang.Class"/>
+ <param name="inputKeyClass" type="java.lang.Class"/>
+ <param name="inputValueClass" type="java.lang.Class"/>
+ <param name="outputKeyClass" type="java.lang.Class"/>
+ <param name="outputValueClass" type="java.lang.Class"/>
+ <param name="byValue" type="boolean"/>
+ <param name="reducerConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Sets the Reducer class to the chain job's JobConf.
+ <p/>
+ It has to be specified how key and values are passed from one element of
+ the chain to the next, by value or by reference. If a Reducer leverages the
+ assumed semantics that the key and values are not modified by the collector
+ 'by value' must be used. If the Reducer does not expect this semantics, as
+ an optimization to avoid serialization and deserialization 'by reference'
+ can be used.
+ <p/>
+ For the added Reducer the configuration given for it,
+ <code>reducerConf</code>, have precedence over the job's JobConf. This
+ precedence is in effect when the task is running.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainReducer, this is done by the setReducer or the addMapper for the last
+ element in the chain.
+
+ @param job job's JobConf to add the Reducer class.
+ @param klass the Reducer class to add.
+ @param inputKeyClass reducer input key class.
+ @param inputValueClass reducer input value class.
+ @param outputKeyClass reducer output key class.
+ @param outputValueClass reducer output value class.
+ @param byValue indicates if key/values should be passed by value
+ to the next Mapper in the chain, if any.
+ @param reducerConf a JobConf with the configuration for the Reducer
+ class. It is recommended to use a JobConf without default values using the
+ <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.]]>
+ </doc>
+ </method>
+ <method name="addMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="klass" type="java.lang.Class"/>
+ <param name="inputKeyClass" type="java.lang.Class"/>
+ <param name="inputValueClass" type="java.lang.Class"/>
+ <param name="outputKeyClass" type="java.lang.Class"/>
+ <param name="outputValueClass" type="java.lang.Class"/>
+ <param name="byValue" type="boolean"/>
+ <param name="mapperConf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Adds a Mapper class to the chain job's JobConf.
+ <p/>
+ It has to be specified how key and values are passed from one element of
+ the chain to the next, by value or by reference. If a Mapper leverages the
+ assumed semantics that the key and values are not modified by the collector
+ 'by value' must be used. If the Mapper does not expect this semantics, as
+ an optimization to avoid serialization and deserialization 'by reference'
+ can be used.
+ <p/>
+ For the added Mapper the configuration given for it,
+ <code>mapperConf</code>, have precedence over the job's JobConf. This
+ precedence is in effect when the task is running.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain
+ .
+
+ @param job chain job's JobConf to add the Mapper class.
+ @param klass the Mapper class to add.
+ @param inputKeyClass mapper input key class.
+ @param inputValueClass mapper input value class.
+ @param outputKeyClass mapper output key class.
+ @param outputValueClass mapper output value class.
+ @param byValue indicates if key/values should be passed by value
+ to the next Mapper in the chain, if any.
+ @param mapperConf a JobConf with the configuration for the Mapper
+ class. It is recommended to use a JobConf without default values using the
+ <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configures the ChainReducer, the Reducer and all the Mappers in the chain.
+ <p/>
+ If this method is overriden <code>super.configure(...)</code> should be
+ invoked at the beginning of the overwriter method.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="values" type="java.util.Iterator"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Chains the <code>reduce(...)</code> method of the Reducer with the
+ <code>map(...) </code> methods of the Mappers in the chain.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes the ChainReducer, the Reducer and all the Mappers in the chain.
+ <p/>
+ If this method is overriden <code>super.close()</code> should be
+ invoked at the end of the overwriter method.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The ChainReducer class allows to chain multiple Mapper classes after a
+ Reducer within the Reducer task.
+ <p/>
+ For each record output by the Reducer, the Mapper classes are invoked in a
+ chained (or piped) fashion, the output of the first becomes the input of the
+ second, and so on until the last Mapper, the output of the last Mapper will
+ be written to the task's output.
+ <p/>
+ The key functionality of this feature is that the Mappers in the chain do not
+ need to be aware that they are executed after the Reducer or in a chain.
+ This enables having reusable specialized Mappers that can be combined to
+ perform composite operations within a single task.
+ <p/>
+ Special care has to be taken when creating chains that the key/values output
+ by a Mapper are valid for the following Mapper in the chain. It is assumed
+ all Mappers and the Reduce in the chain use maching output and input key and
+ value classes as no conversion is done by the chaining code.
+ <p/>
+ Using the ChainMapper and the ChainReducer classes is possible to compose
+ Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
+ immediate benefit of this pattern is a dramatic reduction in disk IO.
+ <p/>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainReducer, this is done by the setReducer or the addMapper for the last
+ element in the chain.
+ <p/>
+ ChainReducer usage pattern:
+ <p/>
+ <pre>
+ ...
+ conf.setJobName("chain");
+ conf.setInputFormat(TextInputFormat.class);
+ conf.setOutputFormat(TextOutputFormat.class);
+ <p/>
+ JobConf mapAConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
+ Text.class, Text.class, true, mapAConf);
+ <p/>
+ JobConf mapBConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
+ LongWritable.class, Text.class, false, mapBConf);
+ <p/>
+ JobConf reduceConf = new JobConf(false);
+ ...
+ ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
+ Text.class, Text.class, true, reduceConf);
+ <p/>
+ ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
+ LongWritable.class, Text.class, false, null);
+ <p/>
+ ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
+ LongWritable.class, LongWritable.class, true, null);
+ <p/>
+ FileInputFormat.setInputPaths(conf, inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+ ...
+ <p/>
+ JobClient jc = new JobClient(conf);
+ RunningJob job = jc.submitJob(conf);
+ ...
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.ChainReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.CombineFileInputFormat -->
+ <class name="CombineFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="CombineFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor]]>
+ </doc>
+ </constructor>
+ <method name="setMaxSplitSize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="maxSplitSize" type="long"/>
+ <doc>
+ <![CDATA[Specify the maximum size (in bytes) of each split. Each split is
+ approximately equal to the specified size.]]>
+ </doc>
+ </method>
+ <method name="setMinSplitSizeNode"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="minSplitSizeNode" type="long"/>
+ <doc>
+ <![CDATA[Specify the minimum size (in bytes) of each split per node.
+ This applies to data that is left over after combining data on a single
+ node into splits that are of maximum size specified by maxSplitSize.
+ This leftover data will be combined into its own split if its size
+ exceeds minSplitSizeNode.]]>
+ </doc>
+ </method>
+ <method name="setMinSplitSizeRack"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="minSplitSizeRack" type="long"/>
+ <doc>
+ <![CDATA[Specify the minimum size (in bytes) of each split per rack.
+ This applies to data that is left over after combining data on a single
+ rack into splits that are of maximum size specified by maxSplitSize.
+ This leftover data will be combined into its own split if its size
+ exceeds minSplitSizeRack.]]>
+ </doc>
+ </method>
+ <method name="createPool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="filters" type="java.util.List"/>
+ <doc>
+ <![CDATA[Create a new pool and add the filters to it.
+ A split cannot have files from different pools.]]>
+ </doc>
+ </method>
+ <method name="createPool"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="filters" type="org.apache.hadoop.fs.PathFilter[]"/>
+ <doc>
+ <![CDATA[Create a new pool and add the filters to it.
+ A pathname can satisfy any one of the specified filters.
+ A split cannot have files from different pools.]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[This is not implemented yet.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An abstract {@link org.apache.hadoop.mapred.InputFormat} that returns {@link CombineFileSplit}'s
+ in {@link org.apache.hadoop.mapred.InputFormat#getSplits(JobConf, int)} method.
+ Splits are constructed from the files under the input paths.
+ A split cannot have files from different pools.
+ Each split returned may contain blocks from different files.
+ If a maxSplitSize is specified, then blocks on the same node are
+ combined to form a single split. Blocks that are left over are
+ then combined with other blocks in the same rack.
+ If maxSplitSize is not specified, then blocks from the same rack
+ are combined in a single split; no attempt is made to create
+ node-local splits.
+ If the maxSplitSize is equal to the block size, then this class
+ is similar to the default spliting behaviour in Hadoop: each
+ block is a locally processed split.
+ Subclasses implement {@link org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit, JobConf, Reporter)}
+ to construct <code>RecordReader</code>'s for <code>CombineFileSplit</code>'s.
+ @see CombineFileSplit]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.CombineFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.CombineFileRecordReader -->
+ <class name="CombineFileRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader"/>
+ <constructor name="CombineFileRecordReader" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.mapred.lib.CombineFileSplit, org.apache.hadoop.mapred.Reporter, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[A generic RecordReader that can hand out different recordReaders
+ for each chunk in the CombineFileSplit.]]>
+ </doc>
+ </constructor>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createKey" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="createValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[return the amount of data processed]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[return progress based on the amount of data processed so far.]]>
+ </doc>
+ </method>
+ <method name="initNextRecordReader" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the record reader for the next chunk in this CombineFileSplit.]]>
+ </doc>
+ </method>
+ <field name="split" type="org.apache.hadoop.mapred.lib.CombineFileSplit"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="jc" type="org.apache.hadoop.mapred.JobConf"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="reporter" type="org.apache.hadoop.mapred.Reporter"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="rrClass" type="java.lang.Class"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="rrConstructor" type="java.lang.reflect.Constructor"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="idx" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="progress" type="long"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="curReader" type="org.apache.hadoop.mapred.RecordReader"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A generic RecordReader that can hand out different recordReaders
+ for each chunk in a {@link CombineFileSplit}.
+ A CombineFileSplit can combine data chunks from multiple files.
+ This class allows using different RecordReaders for processing
+ these data chunks from different files.
+ @see CombineFileSplit]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.CombineFileRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.lib.CombineFileSplit -->
+ <class name="CombineFileSplit" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="CombineFileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="CombineFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[], long[], java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CombineFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CombineFileSplit" type="org.apache.hadoop.mapred.lib.CombineFileSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Copy constructor]]>
+ </doc>
+ </constructor>
+ <method name="getJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getStartOffsets" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array containing the startoffsets of the files in the split]]>
+ </doc>
+ </method>
+ <method name="getLengths" return="long[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns an array containing the lengths of the files in the split]]>
+ </doc>
+ </method>
+ <method name="getOffset" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the start offset of the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the length of the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getNumPaths" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="i" type="int"/>
+ <doc>
+ <![CDATA[Returns the i<sup>th</sup> Path]]>
+ </doc>
+ </method>
+ <method name="getPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns all the Paths in the split]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Returns all the Paths where this input-split resides]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A sub-collection of input files. Unlike {@link org.apache.hadoop.mapred.FileSplit},
+ CombineFileSplit * class does not represent a split of a file, but a split of input files
+ into smaller sets. A split may contain blocks from different file but all
+ the blocks in the same split are probably local to some rack <br>
+ CombineFileSplit can be used to implement {@link org.apache.hadoop.mapred.RecordReader}'s,
+ with reading one record per file.
+ @see org.apache.hadoop.mapred.FileSplit
+ @see CombineFileInputFormat]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.CombineFileSplit -->
+ <!-- start class org.apache.hadoop.mapred.lib.DelegatingInputFormat -->
+ <class name="DelegatingInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat"/>
+ <constructor name="DelegatingInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} that delegates behaviour of paths to multiple other
+ InputFormats.
+
+ @see MultipleInputs#addInputPath(JobConf, Path, Class, Class)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.DelegatingInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.DelegatingMapper -->
+ <class name="DelegatingMapper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper"/>
+ <constructor name="DelegatingMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="outputCollector" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link Mapper} that delegates behaviour of paths to multiple other
+ mappers.
+
+ @see MultipleInputs#addInputPath(JobConf, Path, Class, Class)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.DelegatingMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <class name="FieldSelectionMapReduce" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper"/>
+ <implements name="org.apache.hadoop.mapred.Reducer"/>
+ <constructor name="FieldSelectionMapReduce"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to output.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements a mapper/reducer class that can be used to perform
+ field selections in a manner similar to unix cut. The input data is treated
+ as fields separated by a user specified separator (the default value is
+ "\t"). The user can specify a list of fields that form the map output keys,
+ and a list of fields that form the map output values. If the inputformat is
+ TextInputFormat, the mapper will ignore the key to the map function. and the
+ fields are from the value only. Otherwise, the fields are the union of those
+ from the key and those from the value.
+
+ The field separator is under attribute "mapred.data.field.separator"
+
+ The map output field list spec is under attribute "map.output.key.value.fields.spec".
+ The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
+ key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
+ Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
+ (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all
+ the fields starting from field 3. The open range field spec applies value fields only.
+ They have no effect on the key fields.
+
+ Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
+ and use fields 6,5,1,2,3,7 and above for values.
+
+ The reduce output field list spec is under attribute "reduce.output.key.value.fields.spec".
+
+ The reducer extracts output key/value pairs in a similar manner, except that
+ the key is never ignored.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+ <!-- start class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <class name="HashPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use
+ {@link org.apache.hadoop.mapreduce.lib.partition.HashPartitioner} instead.">
+ <implements name="org.apache.hadoop.mapred.Partitioner"/>
+ <constructor name="HashPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="numReduceTasks" type="int"/>
+ <doc>
+ <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partition keys by their {@link Object#hashCode()}.
+ @deprecated Use
+ {@link org.apache.hadoop.mapreduce.lib.partition.HashPartitioner} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.HashPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <class name="IdentityMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.Mapper} instead.">
+ <implements name="org.apache.hadoop.mapred.Mapper"/>
+ <constructor name="IdentityMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The identify function. Input key/value pair is written directly to
+ output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Implements the identity function, mapping inputs directly to outputs.
+ @deprecated Use {@link org.apache.hadoop.mapreduce.Mapper} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <class name="IdentityReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.Reducer} instead.">
+ <implements name="org.apache.hadoop.mapred.Reducer"/>
+ <constructor name="IdentityReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="values" type="java.util.Iterator"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Writes all keys and values directly to output.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Performs no reduction, writing all input values directly to the output.
+ @deprecated Use {@link org.apache.hadoop.mapreduce.Reducer} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.IdentityReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.InputSampler -->
+ <class name="InputSampler" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="InputSampler" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="writePartitionFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="sampler" type="org.apache.hadoop.mapred.lib.InputSampler.Sampler"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write a partition file for the given job, using the Sampler provided.
+ Queries the sampler for a sample keyset, sorts by the output key
+ comparator, selects the keys for each rank, and writes to the destination
+ returned from {@link
+ org.apache.hadoop.mapred.lib.TotalOrderPartitioner#getPartitionFile}.]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Driver for InputSampler from the command line.
+ Configures a JobConf instance and calls {@link #writePartitionFile}.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Utility for collecting samples and writing a partition file for
+ {@link org.apache.hadoop.mapred.lib.TotalOrderPartitioner}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InputSampler -->
+ <!-- start class org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler -->
+ <class name="InputSampler.IntervalSampler" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.InputSampler.Sampler"/>
+ <constructor name="InputSampler.IntervalSampler" type="double"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new IntervalSampler sampling <em>all</em> splits.
+ @param freq The frequency with which records will be emitted.]]>
+ </doc>
+ </constructor>
+ <constructor name="InputSampler.IntervalSampler" type="double, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new IntervalSampler.
+ @param freq The frequency with which records will be emitted.
+ @param maxSplitsSampled The maximum number of splits to examine.
+ @see #getSample]]>
+ </doc>
+ </constructor>
+ <method name="getSample" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="org.apache.hadoop.mapred.InputFormat"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For each split sampled, emit when the ratio of the number of records
+ retained to the total record count is less than the specified
+ frequency.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sample from s splits at regular intervals.
+ Useful for sorted data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler -->
+ <!-- start class org.apache.hadoop.mapred.lib.InputSampler.RandomSampler -->
+ <class name="InputSampler.RandomSampler" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.InputSampler.Sampler"/>
+ <constructor name="InputSampler.RandomSampler" type="double, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new RandomSampler sampling <em>all</em> splits.
+ This will read every split at the client, which is very expensive.
+ @param freq Probability with which a key will be chosen.
+ @param numSamples Total number of samples to obtain from all selected
+ splits.]]>
+ </doc>
+ </constructor>
+ <constructor name="InputSampler.RandomSampler" type="double, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new RandomSampler.
+ @param freq Probability with which a key will be chosen.
+ @param numSamples Total number of samples to obtain from all selected
+ splits.
+ @param maxSplitsSampled The maximum number of splits to examine.]]>
+ </doc>
+ </constructor>
+ <method name="getSample" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="org.apache.hadoop.mapred.InputFormat"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Randomize the split order, then take the specified number of keys from
+ each split sampled, where each key is selected with the specified
+ probability and possibly replaced by a subsequently selected key when
+ the quota of keys from that split is satisfied.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Sample from random points in the input.
+ General-purpose sampler. Takes numSamples / maxSplitsSampled inputs from
+ each split.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InputSampler.RandomSampler -->
+ <!-- start interface org.apache.hadoop.mapred.lib.InputSampler.Sampler -->
+ <interface name="InputSampler.Sampler" abstract="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="getSample" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="org.apache.hadoop.mapred.InputFormat"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For a given job, collect and return a subset of the keys from the
+ input data.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Interface to sample using an {@link org.apache.hadoop.mapred.InputFormat}.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.InputSampler.Sampler -->
+ <!-- start class org.apache.hadoop.mapred.lib.InputSampler.SplitSampler -->
+ <class name="InputSampler.SplitSampler" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.InputSampler.Sampler"/>
+ <constructor name="InputSampler.SplitSampler" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a SplitSampler sampling <em>all</em> splits.
+ Takes the first numSamples / numSplits records from each split.
+ @param numSamples Total number of samples to obtain from all selected
+ splits.]]>
+ </doc>
+ </constructor>
+ <constructor name="InputSampler.SplitSampler" type="int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create a new SplitSampler.
+ @param numSamples Total number of samples to obtain from all selected
+ splits.
+ @param maxSplitsSampled The maximum number of splits to examine.]]>
+ </doc>
+ </constructor>
+ <method name="getSample" return="java.lang.Object[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inf" type="org.apache.hadoop.mapred.InputFormat"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[From each split sampled, take the first numSamples / numSplits records.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Samples the first n records from s splits.
+ Inexpensive way to sample random data.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InputSampler.SplitSampler -->
+ <!-- start class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <class name="InverseMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.lib.map.InverseMapper}
+ instead.">
+ <implements name="org.apache.hadoop.mapred.Mapper"/>
+ <constructor name="InverseMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[The inverse function. Input keys and values are swapped.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that swaps keys and values.
+ @deprecated Use {@link org.apache.hadoop.mapreduce.lib.map.InverseMapper}
+ instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.InverseMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedComparator -->
+ <class name="KeyFieldBasedComparator" extends="org.apache.hadoop.io.WritableComparator"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="KeyFieldBasedComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[This comparator implementation provides a subset of the features provided
+ by the Unix/GNU Sort. In particular, the supported features are:
+ -n, (Sort numerically)
+ -r, (Reverse the result of comparison)
+ -k pos1[,pos2], where pos is of the form f[.c][opts], where f is the number
+ of the field to use, and c is the number of the first character from the
+ beginning of the field. Fields and character posns are numbered starting
+ with 1; a character position of zero in pos2 indicates the field's last
+ character. If '.c' is omitted from pos1, it defaults to 1 (the beginning
+ of the field); if omitted from pos2, it defaults to 0 (the end of the
+ field). opts are ordering options (any of 'nr' as described above).
+ We assume that the fields in the key are separated by
+ map.output.key.field.separator.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedComparator -->
+ <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <class name="KeyFieldBasedPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner"/>
+ <constructor name="KeyFieldBasedPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="numReduceTasks" type="int"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="b" type="byte[]"/>
+ <param name="start" type="int"/>
+ <param name="end" type="int"/>
+ <param name="currentHash" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[Defines a way to partition keys based on certain key fields (also see
+ {@link KeyFieldBasedComparator}.
+ The key specification supported is of the form -k pos1[,pos2], where,
+ pos is of the form f[.c][opts], where f is the number
+ of the key field to use, and c is the number of the first character from
+ the beginning of the field. Fields and character posns are numbered
+ starting with 1; a character position of zero in pos2 indicates the
+ field's last character. If '.c' is omitted from pos1, it defaults to 1
+ (the beginning of the field); if omitted from pos2, it defaults to 0
+ (the end of the field).]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+ <!-- start class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <class name="LongSumReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use {@link org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer}
+ instead.">
+ <implements name="org.apache.hadoop.mapred.Reducer"/>
+ <constructor name="LongSumReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="values" type="java.util.Iterator"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Reducer} that sums long values.
+ @deprecated Use {@link org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer}
+ instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.LongSumReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleInputs -->
+ <class name="MultipleInputs" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleInputs"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inputFormatClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add a {@link Path} with a custom {@link InputFormat} to the list of
+ inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for the job
+ @param inputFormatClass {@link InputFormat} class to use for this path]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <param name="inputFormatClass" type="java.lang.Class"/>
+ <param name="mapperClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Add a {@link Path} with a custom {@link InputFormat} and
+ {@link Mapper} to the list of inputs for the map-reduce job.
+
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for the job
+ @param inputFormatClass {@link InputFormat} class to use for this path
+ @param mapperClass {@link Mapper} class to use for this path]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class supports MapReduce jobs that have multiple input paths with
+ a different {@link InputFormat} and {@link Mapper} for each path]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleInputs -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <class name="MultipleOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a composite record writer that can write key/value data to different
+ output files
+
+ @param fs
+ the file system to use
+ @param job
+ the job conf for the job
+ @param name
+ the leaf file name for the output file (such as part-00000")
+ @param arg3
+ a progressable for reporting progress.
+ @return a composite record writer
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="generateLeafFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the leaf name for the output file name. The default behavior does
+ not change the leaf file name (such as part-00000)
+
+ @param name
+ the leaf file name for the output file
+ @return the given leaf file name]]>
+ </doc>
+ </method>
+ <method name="generateFileNameForKeyValue" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the file output file name based on the given key and the leaf file
+ name. The default behavior is that the file name does not depend on the
+ key.
+
+ @param key
+ the key of the output data
+ @param name
+ the leaf file name
+ @return generated file name]]>
+ </doc>
+ </method>
+ <method name="generateActualKey" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate the actual key from the given key/value. The default behavior is that
+ the actual key is equal to the given key
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual key derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="generateActualValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate the actual value from the given key and value. The default behavior is that
+ the actual value is equal to the given value
+
+ @param key
+ the key of the output data
+ @param value
+ the value of the output data
+ @return the actual value derived from the given key/value]]>
+ </doc>
+ </method>
+ <method name="getInputFileBasedOutputFileName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate the outfile name based on a given anme and the input file name. If
+ the map input file does not exists (i.e. this is not for a map only job),
+ the given name is returned unchanged. If the config value for
+ "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
+ name is returned unchanged. Otherwise, return a file name consisting of the
+ N trailing legs of the input file name where N is the config value for
+ "num.of.trailing.legs.to.use".
+
+ @param job
+ the job config
+ @param name
+ the output file name
+ @return the outfile name based on a given anme and the input file name.]]>
+ </doc>
+ </method>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param fs
+ the file system to use
+ @param job
+ a job conf object
+ @param name
+ the name of the file over which a record writer object will be
+ constructed
+ @param arg3
+ a progressable object
+ @return A RecordWriter object over the given file
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This abstract class extends the FileOutputFormat, allowing to write the
+ output data to different output files. There are three basic use cases for
+ this class.
+
+ Case one: This class is used for a map reduce job with at least one reducer.
+ The reducer wants to write data to different files depending on the actual
+ keys. It is assumed that a key (or value) encodes the actual key (value)
+ and the desired location for the actual key (value).
+
+ Case two: This class is used for a map only job. The job wants to use an
+ output file name that is either a part of the input file name of the input
+ data, or some derivation of it.
+
+ Case three: This class is used for a map only job. The job wants to use an
+ output file name that depends on both the keys and the input file name,]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputs -->
+ <class name="MultipleOutputs" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleOutputs" type="org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Creates and initializes multiple named outputs support, it should be
+ instantiated in the Mapper/Reducer configure method.
+
+ @param job the job configuration object]]>
+ </doc>
+ </constructor>
+ <method name="getNamedOutputsList" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Returns list of channel names.
+
+ @param conf job conf
+ @return List of channel Names]]>
+ </doc>
+ </method>
+ <method name="isMultiNamedOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns if a named output is multiple.
+
+ @param conf job conf
+ @param namedOutput named output
+ @return <code>true</code> if the name output is multi, <code>false</code>
+ if it is single. If the name output is not defined it returns
+ <code>false</code>]]>
+ </doc>
+ </method>
+ <method name="getNamedOutputFormatClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the named output OutputFormat.
+
+ @param conf job conf
+ @param namedOutput named output
+ @return namedOutput OutputFormat]]>
+ </doc>
+ </method>
+ <method name="getNamedOutputKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the key class for a named output.
+
+ @param conf job conf
+ @param namedOutput named output
+ @return class for the named output key]]>
+ </doc>
+ </method>
+ <method name="getNamedOutputValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the value class for a named output.
+
+ @param conf job conf
+ @param namedOutput named output
+ @return class of named output value]]>
+ </doc>
+ </method>
+ <method name="addNamedOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <param name="outputFormatClass" type="java.lang.Class"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valueClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Adds a named output for the job.
+ <p/>
+
+ @param conf job conf to add the named output
+ @param namedOutput named output name, it has to be a word, letters
+ and numbers only, cannot be the word 'part' as
+ that is reserved for the
+ default output.
+ @param outputFormatClass OutputFormat class.
+ @param keyClass key class
+ @param valueClass value class]]>
+ </doc>
+ </method>
+ <method name="addMultiNamedOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="namedOutput" type="java.lang.String"/>
+ <param name="outputFormatClass" type="java.lang.Class"/>
+ <param name="keyClass" type="java.lang.Class"/>
+ <param name="valueClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Adds a multi named output for the job.
+ <p/>
+
+ @param conf job conf to add the named output
+ @param namedOutput named output name, it has to be a word, letters
+ and numbers only, cannot be the word 'part' as
+ that is reserved for the
+ default output.
+ @param outputFormatClass OutputFormat class.
+ @param keyClass key class
+ @param valueClass value class]]>
+ </doc>
+ </method>
+ <method name="setCountersEnabled"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="enabled" type="boolean"/>
+ <doc>
+ <![CDATA[Enables or disables counters for the named outputs.
+ <p/>
+ By default these counters are disabled.
+ <p/>
+ MultipleOutputs supports counters, by default the are disabled.
+ The counters group is the {@link MultipleOutputs} class name.
+ </p>
+ The names of the counters are the same as the named outputs. For multi
+ named outputs the name of the counter is the concatenation of the named
+ output, and underscore '_' and the multiname.
+
+ @param conf job conf to enableadd the named output.
+ @param enabled indicates if the counters will be enabled or not.]]>
+ </doc>
+ </method>
+ <method name="getCountersEnabled" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Returns if the counters for the named outputs are enabled or not.
+ <p/>
+ By default these counters are disabled.
+ <p/>
+ MultipleOutputs supports counters, by default the are disabled.
+ The counters group is the {@link MultipleOutputs} class name.
+ </p>
+ The names of the counters are the same as the named outputs. For multi
+ named outputs the name of the counter is the concatenation of the named
+ output, and underscore '_' and the multiname.
+
+
+ @param conf job conf to enableadd the named output.
+ @return TRUE if the counters are enabled, FALSE if they are disabled.]]>
+ </doc>
+ </method>
+ <method name="getNamedOutputs" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns iterator with the defined name outputs.
+
+ @return iterator with the defined named outputs]]>
+ </doc>
+ </method>
+ <method name="getCollector" return="org.apache.hadoop.mapred.OutputCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="namedOutput" type="java.lang.String"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the output collector for a named output.
+ <p/>
+
+ @param namedOutput the named output name
+ @param reporter the reporter
+ @return the output collector for the given named output
+ @throws IOException thrown if output collector could not be created]]>
+ </doc>
+ </method>
+ <method name="getCollector" return="org.apache.hadoop.mapred.OutputCollector"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="namedOutput" type="java.lang.String"/>
+ <param name="multiName" type="java.lang.String"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the output collector for a multi named output.
+ <p/>
+
+ @param namedOutput the named output name
+ @param multiName the multi name part
+ @param reporter the reporter
+ @return the output collector for the given named output
+ @throws IOException thrown if output collector could not be created]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Closes all the opened named outputs.
+ <p/>
+ If overriden subclasses must invoke <code>super.close()</code> at the
+ end of their <code>close()</code>
+
+ @throws java.io.IOException thrown if any of the MultipleOutput files
+ could not be closed properly.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The MultipleOutputs class simplifies writting to additional outputs other
+ than the job default output via the <code>OutputCollector</code> passed to
+ the <code>map()</code> and <code>reduce()</code> methods of the
+ <code>Mapper</code> and <code>Reducer</code> implementations.
+ <p/>
+ Each additional output, or named output, may be configured with its own
+ <code>OutputFormat</code>, with its own key class and with its own value
+ class.
+ <p/>
+ A named output can be a single file or a multi file. The later is refered as
+ a multi named output.
+ <p/>
+ A multi named output is an unbound set of files all sharing the same
+ <code>OutputFormat</code>, key class and value class configuration.
+ <p/>
+ When named outputs are used within a <code>Mapper</code> implementation,
+ key/values written to a name output are not part of the reduce phase, only
+ key/values written to the job <code>OutputCollector</code> are part of the
+ reduce phase.
+ <p/>
+ MultipleOutputs supports counters, by default the are disabled. The counters
+ group is the {@link MultipleOutputs} class name.
+ </p>
+ The names of the counters are the same as the named outputs. For multi
+ named outputs the name of the counter is the concatenation of the named
+ output, and underscore '_' and the multiname.
+ <p/>
+ Job configuration usage pattern is:
+ <pre>
+
+ JobConf conf = new JobConf();
+
+ conf.setInputPath(inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+
+ conf.setMapperClass(MOMap.class);
+ conf.setReducerClass(MOReduce.class);
+ ...
+
+ // Defines additional single text based output 'text' for the job
+ MultipleOutputs.addNamedOutput(conf, "text", TextOutputFormat.class,
+ LongWritable.class, Text.class);
+
+ // Defines additional multi sequencefile based output 'sequence' for the
+ // job
+ MultipleOutputs.addMultiNamedOutput(conf, "seq",
+ SequenceFileOutputFormat.class,
+ LongWritable.class, Text.class);
+ ...
+
+ JobClient jc = new JobClient();
+ RunningJob job = jc.submitJob(conf);
+
+ ...
+ </pre>
+ <p/>
+ Job configuration usage pattern is:
+ <pre>
+
+ public class MOReduce implements
+ Reducer&lt;WritableComparable, Writable&gt; {
+ private MultipleOutputs mos;
+
+ public void configure(JobConf conf) {
+ ...
+ mos = new MultipleOutputs(conf);
+ }
+
+ public void reduce(WritableComparable key, Iterator&lt;Writable&gt; values,
+ OutputCollector output, Reporter reporter)
+ throws IOException {
+ ...
+ mos.getCollector("text", reporter).collect(key, new Text("Hello"));
+ mos.getCollector("seq", "A", reporter).collect(key, new Text("Bye"));
+ mos.getCollector("seq", "B", reporter).collect(key, new Text("Chau"));
+ ...
+ }
+
+ public void close() throws IOException {
+ mos.close();
+ ...
+ }
+
+ }
+ </pre>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputs -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <class name="MultipleSequenceFileOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleSequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output data
+ to different output files in sequence file output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <class name="MultipleTextOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultipleTextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output
+ data to different output files in Text output format.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <class name="MultithreadedMapRunner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.MapRunnable"/>
+ <constructor name="MultithreadedMapRunner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="org.apache.hadoop.mapred.RecordReader"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Multithreaded implementation for @link org.apache.hadoop.mapred.MapRunnable.
+ <p>
+ It can be used instead of the default implementation,
+ @link org.apache.hadoop.mapred.MapRunner, when the Map operation is not CPU
+ bound in order to improve throughput.
+ <p>
+ Map implementations using this MapRunnable must be thread-safe.
+ <p>
+ The Map-Reduce job has to be configured to use this MapRunnable class (using
+ the JobConf.setMapRunnerClass method) and
+ the number of thread the thread-pool can use with the
+ <code>mapred.map.multithreadedrunner.threads</code> property, its default
+ value is 10 threads.
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+ <!-- start class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+ <class name="NLineInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="NLineInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="numSplits" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Logically splits the set of input files for the job, splits N lines
+ of the input as one split.
+
+ @see org.apache.hadoop.mapred.FileInputFormat#getSplits(JobConf, int)]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[NLineInputFormat which splits N lines of input as one split.
+
+ In many "pleasantly" parallel applications, each process/mapper
+ processes the same input file (s), but with computations are
+ controlled by different parameters.(Referred to as "parameter sweeps").
+ One way to achieve this, is to specify a set of parameters
+ (one set per line) as input in a control file
+ (which is the input path to the map-reduce application,
+ where as the input dataset is specified
+ via a config variable in JobConf.).
+
+ The NLineInputFormat can be used in such applications, that splits
+ the input file such that by default, one line is fed as
+ a value to one map task, and key is the offset.
+ i.e. (k,v) is (LongWritable, Text).
+ The location hints will span the whole mapred cluster.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <class name="NullOutputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use
+ {@link org.apache.hadoop.mapreduce.lib.output.NullOutputFormat} instead.">
+ <implements name="org.apache.hadoop.mapred.OutputFormat"/>
+ <constructor name="NullOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <doc>
+ <![CDATA[Consume all outputs and put them in /dev/null.
+ @deprecated Use
+ {@link org.apache.hadoop.mapreduce.lib.output.NullOutputFormat} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <class name="RegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper"/>
+ <constructor name="RegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.RegexMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+ <class name="TokenCountMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="Use
+ {@link org.apache.hadoop.mapreduce.lib.map.TokenCounterMapper} instead.">
+ <implements name="org.apache.hadoop.mapred.Mapper"/>
+ <constructor name="TokenCountMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that maps text values into <token,freq> pairs. Uses
+ {@link StringTokenizer} to break text into tokens.
+ @deprecated Use
+ {@link org.apache.hadoop.mapreduce.lib.map.TokenCounterMapper} instead.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.TotalOrderPartitioner -->
+ <class name="TotalOrderPartitioner" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Partitioner"/>
+ <constructor name="TotalOrderPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Read in the partition file and build indexing data structures.
+ If the keytype is {@link org.apache.hadoop.io.BinaryComparable} and
+ <tt>total.order.partitioner.natural.order</tt> is not false, a trie
+ of the first <tt>total.order.partitioner.max.trie.depth</tt>(2) + 1 bytes
+ will be built. Otherwise, keys will be located using a binary search of
+ the partition keyset using the {@link org.apache.hadoop.io.RawComparator}
+ defined for this job. The input file must be sorted with the same
+ comparator and contain {@link
+ org.apache.hadoop.mapred.JobConf#getNumReduceTasks} - 1 keys.]]>
+ </doc>
+ </method>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="numPartitions" type="int"/>
+ </method>
+ <method name="setPartitionFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="p" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the path to the SequenceFile storing the sorted partition keyset.
+ It must be the case that for <tt>R</tt> reduces, there are <tt>R-1</tt>
+ keys in the SequenceFile.]]>
+ </doc>
+ </method>
+ <method name="getPartitionFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the path to the SequenceFile storing the sorted partition keyset.
+ @see #setPartitionFile(JobConf,Path)]]>
+ </doc>
+ </method>
+ <field name="DEFAULT_PATH" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[Partitioner effecting a total order by reading split points from
+ an externally generated source.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.TotalOrderPartitioner -->
+</package>
+<package name="org.apache.hadoop.mapred.lib.aggregate">
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <class name="DoubleValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="DoubleValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a double value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="double"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a double value.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getSum" return="double"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up a sequence of double
+ values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <class name="LongValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the maximum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <class name="LongValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="newVal" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param newVal
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the minimum of
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <class name="LongValueSum" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="LongValueSum"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object whose string representation represents a long value.]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="long"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a long value.]]>
+ </doc>
+ </method>
+ <method name="getSum" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that sums up
+ a sequence of long values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <class name="StringValueMax" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMax"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the biggest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <class name="StringValueMin" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="StringValueMin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ a string.]]>
+ </doc>
+ </method>
+ <method name="getVal" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the aggregated value]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the aggregated value]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of one element. The element is a string
+ representation of the aggregated value. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that maintain the smallest of
+ a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <class name="UniqValueCount" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="UniqValueCount"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[the default constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="UniqValueCount" type="long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructor
+ @param maxNum the limit in the number of unique values to keep.]]>
+ </doc>
+ </constructor>
+ <method name="setMaxItems" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="n" type="long"/>
+ <doc>
+ <![CDATA[Set the limit on the number of unique values
+ @param n the desired limit on the number of unique values
+ @return the new limit on the number of unique values]]>
+ </doc>
+ </method>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val
+ an object.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return the number of unique objects aggregated]]>
+ </doc>
+ </method>
+ <method name="getUniqueItems" return="java.util.Set"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the set of the unique objects]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return return an array of the unique objects. The return value is
+ expected to be used by the a combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that dedupes a sequence of objects.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <class name="UserDefinedValueAggregatorDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="UserDefinedValueAggregatorDescriptor" type="java.lang.String, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@param className the class name of the user defined descriptor class
+ @param job a configure object used for decriptor configuration]]>
+ </doc>
+ </constructor>
+ <method name="createInstance" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="className" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Create an instance of the given class
+ @param className the name of the class
+ @return a dynamically created instance of the given class]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pairs
+ by delegating the invocation to the real object.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this object.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a wrapper for a user defined value aggregator descriptor.
+ It servs two functions: One is to create an object of ValueAggregatorDescriptor from the
+ name of a user defined class that may be dynamically loaded. The other is to
+ deligate inviokations of generateKeyValPairs function to the created object.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <interface name="ValueAggregator" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add a value to the aggregator
+
+ @param val the value to be added]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of the agregator]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return an array of values as the outputs of the combiner.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This interface defines the minimal protocol for value aggregators.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <class name="ValueAggregatorBaseDescriptor" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+ <constructor name="ValueAggregatorBaseDescriptor"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="generateEntry" return="java.util.Map.Entry"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <param name="id" type="java.lang.String"/>
+ <param name="val" type="org.apache.hadoop.io.Text"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @param id the aggregation id
+ @param val the val associated with the id to be aggregated
+ @return an Entry whose key is the aggregation id prefixed with
+ the aggregation type.]]>
+ </doc>
+ </method>
+ <method name="generateValueAggregator" return="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="type" type="java.lang.String"/>
+ <doc>
+ <![CDATA[@param type the aggregation type
+ @return a value aggregator of the given type.]]>
+ </doc>
+ </method>
+ <method name="generateKeyValPairs" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate 1 or 2 aggregation-id/value pairs for the given key/value pair.
+ The first id will be of type LONG_VALUE_SUM, with "record_count" as
+ its aggregation id. If the input is a file split,
+ the second id of the same type will be generated too, with the file name
+ as its aggregation id. This achieves the behavior of counting the total number
+ of records in the input data, and the number of records in each input file.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[get the input file name.
+
+ @param job a job configuration object]]>
+ </doc>
+ </method>
+ <field name="UNIQ_VALUE_COUNT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="DOUBLE_VALUE_SUM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="VALUE_HISTOGRAM" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="LONG_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MAX" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="STRING_VALUE_MIN" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="inputFile" type="java.lang.String"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This class implements the common functionalities of
+ the subclasses of ValueAggregatorDescriptor class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <class name="ValueAggregatorCombiner" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorCombiner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Combiner does not need to configure.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Combines values for a given key.
+ @param key the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values.
+ @param values the values to combine
+ @param output to collect combined values]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing.]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic combiner of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+ <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <interface name="ValueAggregatorDescriptor" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="generateKeyValPairs" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pair.
+ This function is usually called by the mapper of an Aggregate based job.
+
+ @param key
+ input key
+ @param val
+ input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+ aggregation type which is used to guide the way to aggregate the
+ value in the reduce/combiner phrase of an Aggregate based job.]]>
+ </doc>
+ </method>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Configure the object
+
+ @param job
+ a JobConf object that may contain the information that can be used
+ to configure the object.]]>
+ </doc>
+ </method>
+ <field name="TYPE_SEPARATOR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="ONE" type="org.apache.hadoop.io.Text"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This interface defines the contract a value aggregator descriptor must
+ support. Such a descriptor can be configured with a JobConf object. Its main
+ function is to generate a list of aggregation-id/value pairs. An aggregation
+ id encodes an aggregation type which is used to guide the way to aggregate
+ the value in the reduce/combiner phrase of an Aggregate based job.The mapper in
+ an Aggregate based map/reduce job may create one or more of
+ ValueAggregatorDescriptor objects at configuration time. For each input
+ key/value pair, the mapper will use those objects to create aggregation
+ id/value pairs.]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <class name="ValueAggregatorJob" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorJob"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation. Generic hadoop
+ arguments are accepted.
+ @return a JobConf object ready for submission.
+
+ @throws IOException
+ @see GenericOptionsParser]]>
+ </doc>
+ </method>
+ <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setAggregatorDescriptors"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="descriptors" type="java.lang.Class[]"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[create and run an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This is the main class for creating a map/reduce job using Aggregate
+ framework. The Aggregate is a specialization of map/reduce framework,
+ specilizing for performing various simple aggregations.
+
+ Generally speaking, in order to implement an application using Map/Reduce
+ model, the developer is to implement Map and Reduce functions (and possibly
+ combine function). However, a lot of applications related to counting and
+ statistics computing have very similar characteristics. Aggregate abstracts
+ out the general patterns of these functions and implementing those patterns.
+ In particular, the package provides generic mapper/redducer/combiner classes,
+ and a set of built-in value aggregators, and a generic utility class that
+ helps user create map/reduce jobs using the generic class. The built-in
+ aggregators include:
+
+ sum over numeric values count the number of distinct values compute the
+ histogram of values compute the minimum, maximum, media,average, standard
+ deviation of numeric values
+
+ The developer using Aggregate will need only to provide a plugin class
+ conforming to the following interface:
+
+ public interface ValueAggregatorDescriptor { public ArrayList<Entry>
+ generateKeyValPairs(Object key, Object value); public void
+ configure(JobConfjob); }
+
+ The package also provides a base class, ValueAggregatorBaseDescriptor,
+ implementing the above interface. The user can extend the base class and
+ implement generateKeyValPairs accordingly.
+
+ The primary work of generateKeyValPairs is to emit one or more key/value
+ pairs based on the input key/value pair. The key in an output key/value pair
+ encode two pieces of information: aggregation type and aggregation id. The
+ value will be aggregated onto the aggregation id according the aggregation
+ type.
+
+ This class offers a function to generate a map/reduce job using Aggregate
+ framework. The function takes the following parameters: input directory spec
+ input format (text or sequence file) output directory a file specifying the
+ user plugin class]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <class name="ValueAggregatorJobBase" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper"/>
+ <implements name="org.apache.hadoop.mapred.Reducer"/>
+ <constructor name="ValueAggregatorJobBase"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="logSpec"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="aggregatorDescriptorList" type="java.util.ArrayList"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[This abstract class implements some common functionalities of the
+ the generic mapper, reducer and combiner classes of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <class name="ValueAggregatorMapper" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Writable"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[the map function. It iterates through the value aggregator descriptor
+ list to generate aggregation id/value pairs and emit them.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="org.apache.hadoop.io.Text"/>
+ <param name="arg1" type="java.util.Iterator"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic mapper of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <class name="ValueAggregatorReducer" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ValueAggregatorReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.Text"/>
+ <param name="values" type="java.util.Iterator"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@param key
+ the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values. In effect, data
+ driven computing is achieved. It is assumed that each aggregator's
+ getReport method emits appropriate output for the aggregator. This
+ may be further customiized.
+ @value the values to be aggregated]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="arg1" type="org.apache.hadoop.io.Writable"/>
+ <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Do nothing. Should not be called]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements the generic reducer of Aggregate.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+ <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+ <class name="ValueHistogram" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+ <constructor name="ValueHistogram"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="addNextValue"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="val" type="java.lang.Object"/>
+ <doc>
+ <![CDATA[add the given val to the aggregator.
+
+ @param val the value to be added. It is expected to be a string
+ in the form of xxxx\tnum, meaning xxxx has num occurrences.]]>
+ </doc>
+ </method>
+ <method name="getReport" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return the string representation of this aggregator.
+ It includes the following basic statistics of the histogram:
+ the number of unique values
+ the minimum value
+ the media value
+ the maximum value
+ the average value
+ the standard deviation]]>
+ </doc>
+ </method>
+ <method name="getReportDetails" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a string representation of the list of value/frequence pairs of
+ the histogram]]>
+ </doc>
+ </method>
+ <method name="getCombinerOutput" return="java.util.ArrayList"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a list value/frequence pairs.
+ The return value is expected to be used by the reducer.]]>
+ </doc>
+ </method>
+ <method name="getReportItems" return="java.util.TreeMap"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return a TreeMap representation of the histogram]]>
+ </doc>
+ </method>
+ <method name="reset"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[reset the aggregator]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class implements a value aggregator that computes the
+ histogram of a sequence of strings.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+</package>
+<package name="org.apache.hadoop.mapred.lib.db">
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBConfiguration -->
+ <class name="DBConfiguration" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="configureDB"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="driverClass" type="java.lang.String"/>
+ <param name="dbUrl" type="java.lang.String"/>
+ <param name="userName" type="java.lang.String"/>
+ <param name="passwd" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the DB access related fields in the JobConf.
+ @param job the job
+ @param driverClass JDBC Driver class name
+ @param dbUrl JDBC DB access URL.
+ @param userName DB access username
+ @param passwd DB access passwd]]>
+ </doc>
+ </method>
+ <method name="configureDB"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="driverClass" type="java.lang.String"/>
+ <param name="dbUrl" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Sets the DB access related fields in the JobConf.
+ @param job the job
+ @param driverClass JDBC Driver class name
+ @param dbUrl JDBC DB access URL.]]>
+ </doc>
+ </method>
+ <field name="DRIVER_CLASS_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The JDBC Driver class name]]>
+ </doc>
+ </field>
+ <field name="URL_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[JDBC Database access URL]]>
+ </doc>
+ </field>
+ <field name="USERNAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[User name to access the database]]>
+ </doc>
+ </field>
+ <field name="PASSWORD_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Password to access the database]]>
+ </doc>
+ </field>
+ <field name="INPUT_TABLE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Input table name]]>
+ </doc>
+ </field>
+ <field name="INPUT_FIELD_NAMES_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Field names in the Input table]]>
+ </doc>
+ </field>
+ <field name="INPUT_CONDITIONS_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[WHERE clause in the input SELECT statement]]>
+ </doc>
+ </field>
+ <field name="INPUT_ORDER_BY_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[ORDER BY clause in the input SELECT statement]]>
+ </doc>
+ </field>
+ <field name="INPUT_QUERY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Whole input query, exluding LIMIT...OFFSET]]>
+ </doc>
+ </field>
+ <field name="INPUT_COUNT_QUERY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Input query to get the count of records]]>
+ </doc>
+ </field>
+ <field name="INPUT_CLASS_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Class name implementing DBWritable which will hold input tuples]]>
+ </doc>
+ </field>
+ <field name="OUTPUT_TABLE_NAME_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Output table name]]>
+ </doc>
+ </field>
+ <field name="OUTPUT_FIELD_NAMES_PROPERTY" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Field names in the Output table]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[A container for configuration property names for jobs with DB input/output.
+ <br>
+ The job can be configured using the static methods in this class,
+ {@link DBInputFormat}, and {@link DBOutputFormat}.
+ <p>
+ Alternatively, the properties can be set in the configuration with proper
+ values.
+
+ @see DBConfiguration#configureDB(JobConf, String, String, String, String)
+ @see DBInputFormat#setInput(JobConf, Class, String, String)
+ @see DBInputFormat#setInput(JobConf, Class, String, String, String, String...)
+ @see DBOutputFormat#setOutput(JobConf, String, String...)]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBConfiguration -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat -->
+ <class name="DBInputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputFormat"/>
+ <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+ <constructor name="DBInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="chunks" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getCountQuery" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the query for getting the total number of rows,
+ subclasses can override this for custom behaviour.]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputClass" type="java.lang.Class"/>
+ <param name="tableName" type="java.lang.String"/>
+ <param name="conditions" type="java.lang.String"/>
+ <param name="orderBy" type="java.lang.String"/>
+ <param name="fieldNames" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Initializes the map-part of the job with the appropriate input settings.
+
+ @param job The job
+ @param inputClass the class object implementing DBWritable, which is the
+ Java object holding tuple fields.
+ @param tableName The table to read data from
+ @param conditions The condition which to select data with, eg. '(updated >
+ 20070101 AND length > 0)'
+ @param orderBy the fieldNames in the orderBy clause.
+ @param fieldNames The field names in the table
+ @see #setInput(JobConf, Class, String, String)]]>
+ </doc>
+ </method>
+ <method name="setInput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="inputClass" type="java.lang.Class"/>
+ <param name="inputQuery" type="java.lang.String"/>
+ <param name="inputCountQuery" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Initializes the map-part of the job with the appropriate input settings.
+
+ @param job The job
+ @param inputClass the class object implementing DBWritable, which is the
+ Java object holding tuple fields.
+ @param inputQuery the input query to select fields. Example :
+ "SELECT f1, f2, f3 FROM Mytable ORDER BY f1"
+ @param inputCountQuery the input query that returns the number of records in
+ the table.
+ Example : "SELECT COUNT(f1) FROM Mytable"
+ @see #setInput(JobConf, Class, String, String, String, String...)]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A InputFormat that reads input data from an SQL table.
+ <p>
+ DBInputFormat emits LongWritables containing the record number as
+ key and DBWritables as value.
+
+ The SQL query, and input class can be using one of the two
+ setInput methods.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat.DBInputSplit -->
+ <class name="DBInputFormat.DBInputSplit" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.InputSplit"/>
+ <constructor name="DBInputFormat.DBInputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Default Constructor]]>
+ </doc>
+ </constructor>
+ <constructor name="DBInputFormat.DBInputSplit" type="long, long"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Convenience Constructor
+ @param start the index of the first row to select
+ @param end the index of the last row to select]]>
+ </doc>
+ </constructor>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getStart" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The index of the first row to select]]>
+ </doc>
+ </method>
+ <method name="getEnd" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[@return The index of the last row to select]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[@return The total row count in this split]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="input" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="output" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A InputSplit that spans a set of rows]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat.DBInputSplit -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat.DBRecordReader -->
+ <class name="DBInputFormat.DBRecordReader" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordReader"/>
+ <constructor name="DBInputFormat.DBRecordReader" type="org.apache.hadoop.mapred.lib.db.DBInputFormat.DBInputSplit, java.lang.Class, org.apache.hadoop.mapred.JobConf"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ <doc>
+ <![CDATA[@param split The InputSplit to read data for
+ @throws SQLException]]>
+ </doc>
+ </constructor>
+ <method name="getSelectQuery" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the query for selecting the records,
+ subclasses can override this for custom behaviour.]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createKey" return="org.apache.hadoop.io.LongWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="createValue" return="org.apache.hadoop.mapred.lib.db.DBWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getPos" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="next" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.LongWritable"/>
+ <param name="value" type="org.apache.hadoop.mapred.lib.db.DBWritable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A RecordReader that reads records from a SQL table.
+ Emits LongWritables containing the record number as
+ key and DBWritables as value.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat.DBRecordReader -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat.NullDBWritable -->
+ <class name="DBInputFormat.NullDBWritable" extends="java.lang.Object"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.lib.db.DBWritable"/>
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="DBInputFormat.NullDBWritable"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="java.sql.ResultSet"/>
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="arg0" type="java.sql.PreparedStatement"/>
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ </method>
+ <doc>
+ <![CDATA[A Class that does nothing, implementing DBWritable]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat.NullDBWritable -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBOutputFormat -->
+ <class name="DBOutputFormat" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.OutputFormat"/>
+ <constructor name="DBOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="constructQuery" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="table" type="java.lang.String"/>
+ <param name="fieldNames" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Constructs the query used as the prepared statement to insert data.
+
+ @param table
+ the table to insert into
+ @param fieldNames
+ the fields to insert into. If field names are unknown, supply an
+ array of nulls.]]>
+ </doc>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filesystem" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="filesystem" type="org.apache.hadoop.fs.FileSystem"/>
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="setOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="tableName" type="java.lang.String"/>
+ <param name="fieldNames" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[Initializes the reduce-part of the job with the appropriate output settings
+
+ @param job
+ The job
+ @param tableName
+ The table to insert data into
+ @param fieldNames
+ The field names in the table. If unknown, supply the appropriate
+ number of nulls.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A OutputFormat that sends the reduce output to a SQL table.
+ <p>
+ {@link DBOutputFormat} accepts &lt;key,value&gt; pairs, where
+ key has a type extending DBWritable. Returned {@link RecordWriter}
+ writes <b>only the key</b> to the database with a batch SQL query.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBOutputFormat -->
+ <!-- start class org.apache.hadoop.mapred.lib.db.DBOutputFormat.DBRecordWriter -->
+ <class name="DBOutputFormat.DBRecordWriter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.RecordWriter"/>
+ <constructor name="DBOutputFormat.DBRecordWriter" type="java.sql.Connection, java.sql.PreparedStatement"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ </constructor>
+ <method name="close"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.mapred.lib.db.DBWritable"/>
+ <param name="value" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[{@inheritDoc}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A RecordWriter that writes the reduce output to a SQL table]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.lib.db.DBOutputFormat.DBRecordWriter -->
+ <!-- start interface org.apache.hadoop.mapred.lib.db.DBWritable -->
+ <interface name="DBWritable" abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="statement" type="java.sql.PreparedStatement"/>
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ <doc>
+ <![CDATA[Sets the fields of the object in the {@link PreparedStatement}.
+ @param statement the statement that the fields are put into.
+ @throws SQLException]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="resultSet" type="java.sql.ResultSet"/>
+ <exception name="SQLException" type="java.sql.SQLException"/>
+ <doc>
+ <![CDATA[Reads the fields of the object from the {@link ResultSet}.
+ @param resultSet the {@link ResultSet} to get the fields from.
+ @throws SQLException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Objects that are read from/written to a database should implement
+ <code>DBWritable</code>. DBWritable, is similar to {@link Writable}
+ except that the {@link #write(PreparedStatement)} method takes a
+ {@link PreparedStatement}, and {@link #readFields(ResultSet)}
+ takes a {@link ResultSet}.
+ <p>
+ Implementations are responsible for writing the fields of the object
+ to PreparedStatement, and reading the fields of the object from the
+ ResultSet.
+
+ <p>Example:</p>
+ If we have the following table in the database :
+ <pre>
+ CREATE TABLE MyTable (
+ counter INTEGER NOT NULL,
+ timestamp BIGINT NOT NULL,
+ );
+ </pre>
+ then we can read/write the tuples from/to the table with :
+ <p><pre>
+ public class MyWritable implements Writable, DBWritable {
+ // Some data
+ private int counter;
+ private long timestamp;
+
+ //Writable#write() implementation
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(counter);
+ out.writeLong(timestamp);
+ }
+
+ //Writable#readFields() implementation
+ public void readFields(DataInput in) throws IOException {
+ counter = in.readInt();
+ timestamp = in.readLong();
+ }
+
+ public void write(PreparedStatement statement) throws SQLException {
+ statement.setInt(1, counter);
+ statement.setLong(2, timestamp);
+ }
+
+ public void readFields(ResultSet resultSet) throws SQLException {
+ counter = resultSet.getInt(1);
+ timestamp = resultSet.getLong(2);
+ }
+ }
+ </pre></p>]]>
+ </doc>
+ </interface>
+ <!-- end interface org.apache.hadoop.mapred.lib.db.DBWritable -->
+</package>
+<package name="org.apache.hadoop.mapred.pipes">
+ <!-- start class org.apache.hadoop.mapred.pipes.Submitter -->
+ <class name="Submitter" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="Submitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Submitter" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getExecutable" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Get the URI of the application's executable.
+ @param conf
+ @return the URI where the application's executable is located]]>
+ </doc>
+ </method>
+ <method name="setExecutable"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="executable" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Set the URI for the application's executable. Normally this is a hdfs:
+ location.
+ @param conf
+ @param executable The URI of the application's executable.]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job is using a Java RecordReader.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordReader" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java RecordReader
+ @param conf the configuration to check
+ @return is it a Java RecordReader?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaMapper"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Mapper is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaMapper" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Mapper.
+ @param conf the configuration to check
+ @return is it a Java Mapper?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaReducer"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the Reducer is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+ </doc>
+ </method>
+ <method name="getIsJavaReducer" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Check whether the job is using a Java Reducer.
+ @param conf the configuration to check
+ @return is it a Java Reducer?]]>
+ </doc>
+ </method>
+ <method name="setIsJavaRecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="value" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the job will use a Java RecordWriter.
+ @param conf the configuration to modify
+ @param value the new value to set]]>
+ </doc>
+ </method>
+ <method name="getIsJavaRecordWriter" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Will the reduce use a Java RecordWriter?
+ @param conf the configuration to check
+ @return true, if the output of the job will be written by Java]]>
+ </doc>
+ </method>
+ <method name="getKeepCommandFile" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <doc>
+ <![CDATA[Does the user want to keep the command file for debugging? If this is
+ true, pipes will write a copy of the command data to a file in the
+ task directory named "downlink.data", which may be used to run the C++
+ program under the debugger. You probably also want to set
+ JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from
+ being deleted.
+ To run using the data file, set the environment variable
+ "hadoop.pipes.command.file" to point to the file.
+ @param conf the configuration to check
+ @return will the framework save the command file?]]>
+ </doc>
+ </method>
+ <method name="setKeepCommandFile"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <param name="keep" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether to keep the command file for debugging
+ @param conf the configuration to modify
+ @param keep the new value]]>
+ </doc>
+ </method>
+ <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="Use {@link Submitter#runJob(JobConf)}">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications
+ to the job to run under pipes are made to the configuration.
+ @param conf the job to submit to the cluster (MODIFIED)
+ @throws IOException
+ @deprecated Use {@link Submitter#runJob(JobConf)}]]>
+ </doc>
+ </method>
+ <method name="runJob" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications
+ to the job to run under pipes are made to the configuration.
+ @param conf the job to submit to the cluster (MODIFIED)
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="jobSubmit" return="org.apache.hadoop.mapred.RunningJob"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Submit a job to the Map-Reduce framework.
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+
+ @param conf the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+ running-job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[Submit a pipes job based on the command line arguments.
+ @param args]]>
+ </doc>
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The main entry point and job submitter. It may either be used as a command
+ line-based or API-based method to launch Pipes jobs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.pipes.Submitter -->
+</package>
+<package name="org.apache.hadoop.mapred.tools">
+ <!-- start class org.apache.hadoop.mapred.tools.MRAdmin -->
+ <class name="MRAdmin" extends="org.apache.hadoop.conf.Configured"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="MRAdmin"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="MRAdmin" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[Administrative access to Hadoop Map-Reduce.
+
+ Currently it only provides the ability to connect to the {@link JobTracker}
+ and refresh the service-level authorization policy.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapred.tools.MRAdmin -->
+</package>
+<package name="org.apache.hadoop.mapreduce">
+ <!-- start class org.apache.hadoop.mapreduce.Counter -->
+ <class name="Counter" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="Counter"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="Counter" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setDisplayName"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="displayName" type="java.lang.String"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the binary representation of the counter]]>
+ </doc>
+ </method>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the name of the counter.
+ @return the user facing name of the counter]]>
+ </doc>
+ </method>
+ <method name="getValue" return="long"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[What is the current value of this counter?
+ @return the current value]]>
+ </doc>
+ </method>
+ <method name="increment"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="incr" type="long"/>
+ <doc>
+ <![CDATA[Increment this counter by the given value
+ @param incr the value to increase this counter by]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericRight" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A named counter that tracks the progress of a map/reduce job.
+
+ <p><code>Counters</code> represent global counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> is named by
+ an {@link Enum} and has a long for the value.</p>
+
+ <p><code>Counters</code> are bunched into Groups, each comprising of
+ counters from a particular <code>Enum</code> class.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.Counter -->
+ <!-- start class org.apache.hadoop.mapreduce.CounterGroup -->
+ <class name="CounterGroup" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable"/>
+ <constructor name="CounterGroup" type="java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="CounterGroup" type="java.lang.String, java.lang.String"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the internal name of the group
+ @return the internal name]]>
+ </doc>
+ </method>
+ <method name="getDisplayName" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the display name of the group.
+ @return the human readable name]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapreduce.Counter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="counterName" type="java.lang.String"/>
+ <param name="displayName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Internal to find a counter in a group.
+ @param counterName the name of the counter
+ @param displayName the display name of the counter
+ @return the counter that was found or added]]>
+ </doc>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapreduce.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="counterName" type="java.lang.String"/>
+ </method>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="size" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the number of counters in this group.]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericRight" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="incrAllCounters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="rightGroup" type="org.apache.hadoop.mapreduce.CounterGroup"/>
+ </method>
+ <doc>
+ <![CDATA[A group of {@link Counter}s that logically belong together. Typically,
+ it is an {@link Enum} subclass and the counters are the values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.CounterGroup -->
+ <!-- start class org.apache.hadoop.mapreduce.Counters -->
+ <class name="Counters" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <implements name="java.lang.Iterable"/>
+ <constructor name="Counters"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="findCounter" return="org.apache.hadoop.mapreduce.Counter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="groupName" type="java.lang.String"/>
+ <param name="counterName" type="java.lang.String"/>
+ </method>
+ <method name="findCounter" return="org.apache.hadoop.mapreduce.Counter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Enum"/>
+ <doc>
+ <![CDATA[Find the counter for the given enum. The same enum will always return the
+ same counter.
+ @param key the counter key
+ @return the matching counter object]]>
+ </doc>
+ </method>
+ <method name="getGroupNames" return="java.util.Collection"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the names of all counter classes.
+ @return Set of counter names.]]>
+ </doc>
+ </method>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getGroup" return="org.apache.hadoop.mapreduce.CounterGroup"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="groupName" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Returns the named counter group, or an empty group if there is none
+ with the specified name.]]>
+ </doc>
+ </method>
+ <method name="countCounters" return="int"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the total number of counters, by summing the number of counters
+ in each group.]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Write the set of groups.
+ The external format is:
+ #groups (groupName group)*
+
+ i.e. the number of groups followed by 0 or more groups, where each
+ group is of the form:
+
+ groupDisplayName #counters (false | true counter)*
+
+ where each counter is of the form:
+
+ name (false | true displayName) value]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Read a set of groups.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return textual representation of the counter values.]]>
+ </doc>
+ </method>
+ <method name="incrAllCounters"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="other" type="org.apache.hadoop.mapreduce.Counters"/>
+ <doc>
+ <![CDATA[Increments multiple counters by their amounts in another Counters
+ instance.
+ @param other the other Counters instance]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericRight" type="java.lang.Object"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.Counters -->
+ <!-- start class org.apache.hadoop.mapreduce.ID -->
+ <class name="ID" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.WritableComparable"/>
+ <constructor name="ID" type="int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[constructs an ID object from the given int]]>
+ </doc>
+ </constructor>
+ <constructor name="ID"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getId" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[returns the int which represents the identifier]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="that" type="org.apache.hadoop.mapreduce.ID"/>
+ <doc>
+ <![CDATA[Compare IDs by associated numbers]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="SEPARATOR" type="char"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="id" type="int"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A general identifier, which internally stores the id
+ as an integer. This is the super class of {@link JobID},
+ {@link TaskID} and {@link TaskAttemptID}.
+
+ @see JobID
+ @see TaskID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.ID -->
+ <!-- start class org.apache.hadoop.mapreduce.InputFormat -->
+ <class name="InputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getSplits" return="java.util.List"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Logically split the set of input files for the job.
+
+ <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper}
+ for processing.</p>
+
+ <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the
+ input files are not physically split into chunks. For e.g. a split could
+ be <i>&lt;input-file-path, start, offset&gt;</i> tuple. The InputFormat
+ also creates the {@link RecordReader} to read the {@link InputSplit}.
+
+ @param context job configuration.
+ @return an array of {@link InputSplit}s for the job.]]>
+ </doc>
+ </method>
+ <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Create a record reader for a given split. The framework will call
+ {@link RecordReader#initialize(InputSplit, TaskAttemptContext)} before
+ the split is used.
+ @param split the split to be read
+ @param context the information about the task
+ @return a new record reader
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputFormat</code> describes the input-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the input-specification of the job.
+ <li>
+ Split-up the input file(s) into logical {@link InputSplit}s, each of
+ which is then assigned to an individual {@link Mapper}.
+ </li>
+ <li>
+ Provide the {@link RecordReader} implementation to be used to glean
+ input records from the logical <code>InputSplit</code> for processing by
+ the {@link Mapper}.
+ </li>
+ </ol>
+
+ <p>The default behavior of file-based {@link InputFormat}s, typically
+ sub-classes of {@link FileInputFormat}, is to split the
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in
+ bytes, of the input files. However, the {@link FileSystem} blocksize of
+ the input files is treated as an upper bound for input splits. A lower bound
+ on the split size can be set via
+ <a href="{@docRoot}/../mapred-default.html#mapred.min.split.size">
+ mapred.min.split.size</a>.</p>
+
+ <p>Clearly, logical splits based on input-size is insufficient for many
+ applications since record boundaries are to respected. In such cases, the
+ application has to also implement a {@link RecordReader} on whom lies the
+ responsibility to respect record-boundaries and present a record-oriented
+ view of the logical <code>InputSplit</code> to the individual task.
+
+ @see InputSplit
+ @see RecordReader
+ @see FileInputFormat]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.InputFormat -->
+ <!-- start class org.apache.hadoop.mapreduce.InputSplit -->
+ <class name="InputSplit" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getLength" return="long"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Get the size of the split, so that the input splits can be sorted by size.
+ @return the number of bytes in the split
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Get the list of nodes by name where the data for the split would be local.
+ The locations do not need to be serialized.
+ @return a new array of the node nodes.
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>InputSplit</code> represents the data to be processed by an
+ individual {@link Mapper}.
+
+ <p>Typically, it presents a byte-oriented view on the input and is the
+ responsibility of {@link RecordReader} of the job to process this and present
+ a record-oriented view.
+
+ @see InputFormat
+ @see RecordReader]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.InputSplit -->
+ <!-- start class org.apache.hadoop.mapreduce.Job -->
+ <class name="Job" extends="org.apache.hadoop.mapreduce.JobContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Job"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="Job" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <constructor name="Job" type="org.apache.hadoop.conf.Configuration, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="setNumReduceTasks"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="tasks" type="int"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Set the number of reduce tasks for the job.
+ @param tasks the number of reduce tasks
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setWorkingDirectory"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="dir" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the current working directory for the default file system.
+
+ @param dir the new current working directory.
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setInputFormatClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Set the {@link InputFormat} for the job.
+ @param cls the <code>InputFormat</code> to use
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setOutputFormatClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Set the {@link OutputFormat} for the job.
+ @param cls the <code>OutputFormat</code> to use
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setMapperClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Set the {@link Mapper} for the job.
+ @param cls the <code>Mapper</code> to use
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setJarByClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the Jar by finding where a given class came from.
+ @param cls the example class]]>
+ </doc>
+ </method>
+ <method name="getJar" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the pathname of the job's jar.
+ @return the pathname]]>
+ </doc>
+ </method>
+ <method name="setCombinerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Set the combiner class for the job.
+ @param cls the combiner to use
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setReducerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Set the {@link Reducer} for the job.
+ @param cls the <code>Reducer</code> to use
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setPartitionerClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Set the {@link Partitioner} for the job.
+ @param cls the <code>Partitioner</code> to use
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setMapOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Set the key class for the map output data. This allows the user to
+ specify the map output key class to be different than the final output
+ value class.
+
+ @param theClass the map output key class.
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setMapOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Set the value class for the map output data. This allows the user to
+ specify the map output value class to be different than the final output
+ value class.
+
+ @param theClass the map output value class.
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setOutputKeyClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Set the key class for the job output data.
+
+ @param theClass the key class for the job output data.
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setOutputValueClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="theClass" type="java.lang.Class"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Set the value class for job outputs.
+
+ @param theClass the value class for job outputs.
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setSortComparatorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Define the comparator that controls how the keys are sorted before they
+ are passed to the {@link Reducer}.
+ @param cls the raw comparator
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setGroupingComparatorClass"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="cls" type="java.lang.Class"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Define the comparator that controls which keys are grouped together
+ for a single call to
+ {@link Reducer#reduce(Object, Iterable,
+ org.apache.hadoop.mapreduce.Reducer.Context)}
+ @param cls the raw comparator to use
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="setJobName"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+ <doc>
+ <![CDATA[Set the user-specified job name.
+
+ @param name the job's new name.
+ @throws IllegalStateException if the job is submitted]]>
+ </doc>
+ </method>
+ <method name="getTrackingURL" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the URL where some job progress information will be displayed.
+
+ @return the URL where some job progress information will be displayed.]]>
+ </doc>
+ </method>
+ <method name="mapProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's map-tasks, as a float between 0.0
+ and 1.0. When all map tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's map-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="reduceProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0
+ and 1.0. When all reduce tasks have completed, the function returns 1.0.
+
+ @return the progress of the job's reduce-tasks.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isComplete" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job is finished or not.
+ This is a non-blocking call.
+
+ @return <code>true</code> if the job is complete, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="isSuccessful" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check if the job completed successfully.
+
+ @return <code>true</code> if the job succeeded, else <code>false</code>.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill the running job. Blocks until all job tasks have been
+ killed as well. If the job is no longer running, it simply returns.
+
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="startFrom" type="int"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get events indicating completion (success/failure) of component tasks.
+
+ @param startFrom index to start fetching events from
+ @return an array of {@link TaskCompletionEvent}s
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="killTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Kill indicated task attempt.
+
+ @param taskId the id of the task to be terminated.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="failTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskId" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Fail indicated task attempt.
+
+ @param taskId the id of the task to be terminated.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getCounters" return="org.apache.hadoop.mapreduce.Counters"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Gets the counters for this job.
+
+ @return the counters for this job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="submit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Submit the job to the cluster and return immediately.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="waitForCompletion" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="verbose" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Submit the job to the cluster and wait for it to finish.
+ @param verbose print the progress to the user
+ @return true if the job succeeded
+ @throws IOException thrown if the communication with the
+ <code>JobTracker</code> is lost]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The job submitter's view of the Job. It allows the user to configure the
+ job, submit it, control its execution, and query the state. The set methods
+ only work until the job is submitted, afterwards they will throw an
+ IllegalStateException.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.Job -->
+ <!-- start class org.apache.hadoop.mapreduce.Job.JobState -->
+ <class name="Job.JobState" extends="java.lang.Enum"
+ abstract="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <method name="values" return="org.apache.hadoop.mapreduce.Job.JobState[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="valueOf" return="org.apache.hadoop.mapreduce.Job.JobState"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <field name="DEFINE" type="org.apache.hadoop.mapreduce.Job.JobState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <field name="RUNNING" type="org.apache.hadoop.mapreduce.Job.JobState"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.Job.JobState -->
+ <!-- start class org.apache.hadoop.mapreduce.JobContext -->
+ <class name="JobContext" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="JobContext" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapreduce.JobID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getConfiguration" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Return the configuration for the job.
+ @return the shared configuration object]]>
+ </doc>
+ </method>
+ <method name="getJobID" return="org.apache.hadoop.mapreduce.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the unique ID for the job.
+ @return the object with the job id]]>
+ </doc>
+ </method>
+ <method name="getNumReduceTasks" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get configured the number of reduce tasks for this job. Defaults to
+ <code>1</code>.
+ @return the number of reduce tasks for this job.]]>
+ </doc>
+ </method>
+ <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the current working directory for the default file system.
+
+ @return the directory name.]]>
+ </doc>
+ </method>
+ <method name="getOutputKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the job output data.
+ @return the key class for the job output data.]]>
+ </doc>
+ </method>
+ <method name="getOutputValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for job outputs.
+ @return the value class for job outputs.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputKeyClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the key class for the map output data. If it is not set, use the
+ (final) output key class. This allows the map output key class to be
+ different than the final output key class.
+ @return the map output key class.]]>
+ </doc>
+ </method>
+ <method name="getMapOutputValueClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the value class for the map output data. If it is not set, use the
+ (final) output value class This allows the map output value class to be
+ different than the final output value class.
+
+ @return the map output value class.]]>
+ </doc>
+ </method>
+ <method name="getJobName" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user-specified job name. This is only used to identify the
+ job to the user.
+
+ @return the job's name, defaulting to "".]]>
+ </doc>
+ </method>
+ <method name="getInputFormatClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Get the {@link InputFormat} class for the job.
+
+ @return the {@link InputFormat} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getMapperClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Get the {@link Mapper} class for the job.
+
+ @return the {@link Mapper} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getCombinerClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Get the combiner class for the job.
+
+ @return the combiner class for the job.]]>
+ </doc>
+ </method>
+ <method name="getReducerClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Get the {@link Reducer} class for the job.
+
+ @return the {@link Reducer} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getOutputFormatClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Get the {@link OutputFormat} class for the job.
+
+ @return the {@link OutputFormat} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getPartitionerClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+ <doc>
+ <![CDATA[Get the {@link Partitioner} class for the job.
+
+ @return the {@link Partitioner} class for the job.]]>
+ </doc>
+ </method>
+ <method name="getSortComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the {@link RawComparator} comparator used to compare keys.
+
+ @return the {@link RawComparator} comparator used to compare keys.]]>
+ </doc>
+ </method>
+ <method name="getJar" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the pathname of the job's jar.
+ @return the pathname]]>
+ </doc>
+ </method>
+ <method name="getGroupingComparator" return="org.apache.hadoop.io.RawComparator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the user defined {@link RawComparator} comparator for
+ grouping keys of inputs to the reduce.
+
+ @return comparator set by the user for grouping values.
+ @see Job#setGroupingComparatorClass(Class) for details.]]>
+ </doc>
+ </method>
+ <field name="INPUT_FORMAT_CLASS_ATTR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="MAP_CLASS_ATTR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="COMBINE_CLASS_ATTR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="REDUCE_CLASS_ATTR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="OUTPUT_FORMAT_CLASS_ATTR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="PARTITIONER_CLASS_ATTR" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="conf" type="org.apache.hadoop.mapred.JobConf"
+ transient="false" volatile="false"
+ static="false" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A read-only view of the job that is provided to the tasks while they
+ are running.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.JobContext -->
+ <!-- start class org.apache.hadoop.mapreduce.JobID -->
+ <class name="JobID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.lang.Comparable"/>
+ <constructor name="JobID" type="java.lang.String, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a JobID object
+ @param jtIdentifier jobTracker identifier
+ @param id job number]]>
+ </doc>
+ </constructor>
+ <constructor name="JobID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getJtIdentifier" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapreduce.ID"/>
+ <doc>
+ <![CDATA[Compare JobIds by first jtIdentifiers, then by job numbers]]>
+ </doc>
+ </method>
+ <method name="appendTo" return="java.lang.StringBuilder"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="builder" type="java.lang.StringBuilder"/>
+ <doc>
+ <![CDATA[Add the stuff after the "job" prefix to the given builder. This is useful,
+ because the sub-ids use this substring at the start of their string.
+ @param builder the builder to append to
+ @return the builder that was passed in]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapreduce.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a JobId object from given string
+ @return constructed JobId object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <field name="JOB" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="idFormat" type="java.text.NumberFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[JobID represents the immutable and unique identifier for
+ the job. JobID consists of two parts. First part
+ represents the jobtracker identifier, so that jobID to jobtracker map
+ is defined. For cluster setup this string is the jobtracker
+ start time, for local setting, it is "local".
+ Second part of the JobID is the job number. <br>
+ An example JobID is :
+ <code>job_200707121733_0003</code> , which represents the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse JobID strings, but rather
+ use appropriate constructors or {@link #forName(String)} method.
+
+ @see TaskID
+ @see TaskAttemptID
+ @see org.apache.hadoop.mapred.JobTracker#getNewJobId()
+ @see org.apache.hadoop.mapred.JobTracker#getStartTime()]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.JobID -->
+ <!-- start class org.apache.hadoop.mapreduce.MapContext -->
+ <class name="MapContext" extends="org.apache.hadoop.mapreduce.TaskInputOutputContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MapContext" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapreduce.TaskAttemptID, org.apache.hadoop.mapreduce.RecordReader, org.apache.hadoop.mapreduce.RecordWriter, org.apache.hadoop.mapreduce.OutputCommitter, org.apache.hadoop.mapreduce.StatusReporter, org.apache.hadoop.mapreduce.InputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getInputSplit" return="org.apache.hadoop.mapreduce.InputSplit"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the input split for this map.]]>
+ </doc>
+ </method>
+ <method name="getCurrentKey" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ <method name="getCurrentValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ <method name="nextKeyValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ <doc>
+ <![CDATA[The context that is given to the {@link Mapper}.
+ @param <KEYIN> the key input type to the Mapper
+ @param <VALUEIN> the value input type to the Mapper
+ @param <KEYOUT> the key output type from the Mapper
+ @param <VALUEOUT> the value output type from the Mapper]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.MapContext -->
+ <!-- start class org.apache.hadoop.mapreduce.Mapper -->
+ <class name="Mapper" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Mapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Called once at the beginning of the task.]]>
+ </doc>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Called once for each key/value pair in the input split. Most applications
+ should override this, but the default is the identity function.]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Called once at the end of the task.]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Expert users can override this method for more complete control over the
+ execution of the Mapper.
+ @param context
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Maps input key/value pairs to a set of intermediate key/value pairs.
+
+ <p>Maps are the individual tasks which transform input records into a
+ intermediate records. The transformed intermediate records need not be of
+ the same type as the input records. A given input pair may map to zero or
+ many output pairs.</p>
+
+ <p>The Hadoop Map-Reduce framework spawns one map task for each
+ {@link InputSplit} generated by the {@link InputFormat} for the job.
+ <code>Mapper</code> implementations can access the {@link Configuration} for
+ the job via the {@link JobContext#getConfiguration()}.
+
+ <p>The framework first calls
+ {@link #setup(org.apache.hadoop.mapreduce.Mapper.Context)}, followed by
+ {@link #map(Object, Object, Context)}
+ for each key/value pair in the <code>InputSplit</code>. Finally
+ {@link #cleanup(Context)} is called.</p>
+
+ <p>All intermediate values associated with a given output key are
+ subsequently grouped by the framework, and passed to a {@link Reducer} to
+ determine the final output. Users can control the sorting and grouping by
+ specifying two key {@link RawComparator} classes.</p>
+
+ <p>The <code>Mapper</code> outputs are partitioned per
+ <code>Reducer</code>. Users can control which keys (and hence records) go to
+ which <code>Reducer</code> by implementing a custom {@link Partitioner}.
+
+ <p>Users can optionally specify a <code>combiner</code>, via
+ {@link Job#setCombinerClass(Class)}, to perform local aggregation of the
+ intermediate outputs, which helps to cut down the amount of data transferred
+ from the <code>Mapper</code> to the <code>Reducer</code>.
+
+ <p>Applications can specify if and how the intermediate
+ outputs are to be compressed and which {@link CompressionCodec}s are to be
+ used via the <code>Configuration</code>.</p>
+
+ <p>If the job has zero
+ reduces then the output of the <code>Mapper</code> is directly written
+ to the {@link OutputFormat} without sorting by keys.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class TokenCounterMapper
+ extends Mapper<Object, Text, Text, IntWritable>{
+
+ private final static IntWritable one = new IntWritable(1);
+ private Text word = new Text();
+
+ public void map(Object key, Text value, Context context) throws IOException {
+ StringTokenizer itr = new StringTokenizer(value.toString());
+ while (itr.hasMoreTokens()) {
+ word.set(itr.nextToken());
+ context.collect(word, one);
+ }
+ }
+ }
+ </pre></blockquote></p>
+
+ <p>Applications may override the {@link #run(Context)} method to exert
+ greater control on map processing e.g. multi-threaded <code>Mapper</code>s
+ etc.</p>
+
+ @see InputFormat
+ @see JobContext
+ @see Partitioner
+ @see Reducer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.Mapper -->
+ <!-- start class org.apache.hadoop.mapreduce.Mapper.Context -->
+ <class name="Mapper.Context" extends="org.apache.hadoop.mapreduce.MapContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Mapper.Context" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapreduce.TaskAttemptID, org.apache.hadoop.mapreduce.RecordReader, org.apache.hadoop.mapreduce.RecordWriter, org.apache.hadoop.mapreduce.OutputCommitter, org.apache.hadoop.mapreduce.StatusReporter, org.apache.hadoop.mapreduce.InputSplit"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.Mapper.Context -->
+ <!-- start class org.apache.hadoop.mapreduce.OutputCommitter -->
+ <class name="OutputCommitter" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OutputCommitter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setupJob"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For the framework to setup the job output during initialization
+
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException if temporary output could not be created]]>
+ </doc>
+ </method>
+ <method name="cleanupJob"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[For cleaning up the job's output after job completion
+
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="setupTask"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets up output for the task.
+
+ @param taskContext Context of the task whose output is being written.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="needsTaskCommit" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Check whether task needs a commit
+
+ @param taskContext
+ @return true/false
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="commitTask"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[To promote the task's temporary output to final output location
+
+ The task's output is moved to the job's output directory.
+
+ @param taskContext Context of the task whose output is being written.
+ @throws IOException if commit is not]]>
+ </doc>
+ </method>
+ <method name="abortTask"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Discard the task output
+
+ @param taskContext
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>OutputCommitter</code> describes the commit of task output for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputCommitter</code> of
+ the job to:<p>
+ <ol>
+ <li>
+ Setup the job during initialization. For example, create the temporary
+ output directory for the job during the initialization of the job.
+ </li>
+ <li>
+ Cleanup the job after the job completion. For example, remove the
+ temporary output directory after the job completion.
+ </li>
+ <li>
+ Setup the task temporary output.
+ </li>
+ <li>
+ Check whether a task needs a commit. This is to avoid the commit
+ procedure if a task does not need commit.
+ </li>
+ <li>
+ Commit of the task output.
+ </li>
+ <li>
+ Discard the task commit.
+ </li>
+ </ol>
+
+ @see org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
+ @see JobContext
+ @see TaskAttemptContext]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.OutputCommitter -->
+ <!-- start class org.apache.hadoop.mapreduce.OutputFormat -->
+ <class name="OutputFormat" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="OutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Get the {@link RecordWriter} for the given task.
+
+ @param context the information about the current task.
+ @return a {@link RecordWriter} to write the output for the job.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Check for validity of the output-specification for the job.
+
+ <p>This is to validate the output specification for the job when it is
+ a job is submitted. Typically checks that it does not already exist,
+ throwing an exception when it already exists, so that output is not
+ overwritten.</p>
+
+ @param context information about the job
+ @throws IOException when output should not be attempted]]>
+ </doc>
+ </method>
+ <method name="getOutputCommitter" return="org.apache.hadoop.mapreduce.OutputCommitter"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Get the output committer for this output format. This is responsible
+ for ensuring the output is committed correctly.
+ @param context the task context
+ @return an output committer
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>OutputFormat</code> describes the output-specification for a
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the
+ job to:<p>
+ <ol>
+ <li>
+ Validate the output-specification of the job. For e.g. check that the
+ output directory doesn't already exist.
+ <li>
+ Provide the {@link RecordWriter} implementation to be used to write out
+ the output files of the job. Output files are stored in a
+ {@link FileSystem}.
+ </li>
+ </ol>
+
+ @see RecordWriter]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.OutputFormat -->
+ <!-- start class org.apache.hadoop.mapreduce.Partitioner -->
+ <class name="Partitioner" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Partitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPartition" return="int"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="numPartitions" type="int"/>
+ <doc>
+ <![CDATA[Get the partition number for a given key (hence record) given the total
+ number of partitions i.e. number of reduce-tasks for the job.
+
+ <p>Typically a hash function on a all or a subset of the key.</p>
+
+ @param key the key to be partioned.
+ @param value the entry value.
+ @param numPartitions the total number of partitions.
+ @return the partition number for the <code>key</code>.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partitions the key space.
+
+ <p><code>Partitioner</code> controls the partitioning of the keys of the
+ intermediate map-outputs. The key (or a subset of the key) is used to derive
+ the partition, typically by a hash function. The total number of partitions
+ is the same as the number of reduce tasks for the job. Hence this controls
+ which of the <code>m</code> reduce tasks the intermediate key (and hence the
+ record) is sent for reduction.</p>
+
+ @see Reducer]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.Partitioner -->
+ <!-- start class org.apache.hadoop.mapreduce.RecordReader -->
+ <class name="RecordReader" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="java.io.Closeable"/>
+ <constructor name="RecordReader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Called once at initialization.
+ @param split the split that defines the range of records to read
+ @param context the information about the task
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="nextKeyValue" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Read the next key, value pair.
+ @return true if a key/value pair was read
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="getCurrentKey" return="java.lang.Object"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Get the current key
+ @return the current key or null if there is no current key
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue" return="java.lang.Object"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Get the current value.
+ @return the object that was read
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="getProgress" return="float"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[The current progress of the record reader through its data.
+ @return a number between 0.0 and 1.0 that is the fraction of the data read
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Close the record reader.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The record reader breaks the data into key/value pairs for input to the
+ {@link Mapper}.
+ @param <KEYIN>
+ @param <VALUEIN>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.RecordReader -->
+ <!-- start class org.apache.hadoop.mapreduce.RecordWriter -->
+ <class name="RecordWriter" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="RecordWriter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Writes a key/value pair.
+
+ @param key the key to write.
+ @param value the value to write.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Close this <code>RecordWriter</code> to future operations.
+
+ @param context the context of the task
+ @throws IOException]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[<code>RecordWriter</code> writes the output &lt;key, value&gt; pairs
+ to an output file.
+
+ <p><code>RecordWriter</code> implementations write the job outputs to the
+ {@link FileSystem}.
+
+ @see OutputFormat]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.RecordWriter -->
+ <!-- start class org.apache.hadoop.mapreduce.ReduceContext -->
+ <class name="ReduceContext" extends="org.apache.hadoop.mapreduce.TaskInputOutputContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="ReduceContext" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapreduce.TaskAttemptID, org.apache.hadoop.mapred.RawKeyValueIterator, org.apache.hadoop.mapreduce.Counter, org.apache.hadoop.mapreduce.RecordWriter, org.apache.hadoop.mapreduce.OutputCommitter, org.apache.hadoop.mapreduce.StatusReporter, org.apache.hadoop.io.RawComparator, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </constructor>
+ <method name="nextKey" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Start processing next unique key.]]>
+ </doc>
+ </method>
+ <method name="nextKeyValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Advance to the next key/value pair.]]>
+ </doc>
+ </method>
+ <method name="getCurrentKey" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCurrentValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getValues" return="java.lang.Iterable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Iterate through the values for the current key, reusing the same value
+ object, which is stored in the context.
+ @return the series of values associated with the current key. All of the
+ objects returned directly and indirectly from this method are reused.]]>
+ </doc>
+ </method>
+ <field name="reporter" type="org.apache.hadoop.util.Progressable"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[The context passed to the {@link Reducer}.
+ @param <KEYIN> the class of the input keys
+ @param <VALUEIN> the class of the input values
+ @param <KEYOUT> the class of the output keys
+ @param <VALUEOUT> the class of the output values]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.ReduceContext -->
+ <!-- start class org.apache.hadoop.mapreduce.ReduceContext.ValueIterable -->
+ <class name="ReduceContext.ValueIterable" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="java.lang.Iterable"/>
+ <constructor name="ReduceContext.ValueIterable"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="iterator" return="java.util.Iterator"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.ReduceContext.ValueIterable -->
+ <!-- start class org.apache.hadoop.mapreduce.ReduceContext.ValueIterator -->
+ <class name="ReduceContext.ValueIterator" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <implements name="java.util.Iterator"/>
+ <constructor name="ReduceContext.ValueIterator"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="hasNext" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="next" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="remove"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.ReduceContext.ValueIterator -->
+ <!-- start class org.apache.hadoop.mapreduce.Reducer -->
+ <class name="Reducer" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Reducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Called once at the start of the task.]]>
+ </doc>
+ </method>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="values" type="java.lang.Iterable"/>
+ <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[This method is called once for each key. Most applications will define
+ their reduce class by overriding this method. The default implementation
+ is an identity function.]]>
+ </doc>
+ </method>
+ <method name="cleanup"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Called once at the end of the task.]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Advanced application writers can use the
+ {@link #run(org.apache.hadoop.mapreduce.Reducer.Context)} method to
+ control how the reduce task works.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Reduces a set of intermediate values which share a key to a smaller set of
+ values.
+
+ <p><code>Reducer</code> implementations
+ can access the {@link Configuration} for the job via the
+ {@link JobContext#getConfiguration()} method.</p>
+
+ <p><code>Reducer</code> has 3 primary phases:</p>
+ <ol>
+ <li>
+
+ <h4 id="Shuffle">Shuffle</h4>
+
+ <p>The <code>Reducer</code> copies the sorted output from each
+ {@link Mapper} using HTTP across the network.</p>
+ </li>
+
+ <li>
+ <h4 id="Sort">Sort</h4>
+
+ <p>The framework merge sorts <code>Reducer</code> inputs by
+ <code>key</code>s
+ (since different <code>Mapper</code>s may have output the same key).</p>
+
+ <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
+ being fetched they are merged.</p>
+
+ <h5 id="SecondarySort">SecondarySort</h5>
+
+ <p>To achieve a secondary sort on the values returned by the value
+ iterator, the application should extend the key with the secondary
+ key and define a grouping comparator. The keys will be sorted using the
+ entire key, but will be grouped using the grouping comparator to decide
+ which keys and values are sent in the same call to reduce.The grouping
+ comparator is specified via
+ {@link Job#setGroupingComparatorClass(Class)}. The sort order is
+ controlled by
+ {@link Job#setSortComparatorClass(Class)}.</p>
+
+
+ For example, say that you want to find duplicate web pages and tag them
+ all with the url of the "best" known example. You would set up the job
+ like:
+ <ul>
+ <li>Map Input Key: url</li>
+ <li>Map Input Value: document</li>
+ <li>Map Output Key: document checksum, url pagerank</li>
+ <li>Map Output Value: url</li>
+ <li>Partitioner: by checksum</li>
+ <li>OutputKeyComparator: by checksum and then decreasing pagerank</li>
+ <li>OutputValueGroupingComparator: by checksum</li>
+ </ul>
+ </li>
+
+ <li>
+ <h4 id="Reduce">Reduce</h4>
+
+ <p>In this phase the
+ {@link #reduce(Object, Iterable, Context)}
+ method is called for each <code>&lt;key, (collection of values)></code> in
+ the sorted inputs.</p>
+ <p>The output of the reduce task is typically written to a
+ {@link RecordWriter} via
+ {@link Context#write(Object, Object)}.</p>
+ </li>
+ </ol>
+
+ <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p>
+
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class IntSumReducer<Key> extends Reducer<Key,IntWritable,
+ Key,IntWritable> {
+ private IntWritable result = new IntWritable();
+
+ public void reduce(Key key, Iterable<IntWritable> values,
+ Context context) throws IOException {
+ int sum = 0;
+ for (IntWritable val : values) {
+ sum += val.get();
+ }
+ result.set(sum);
+ context.collect(key, result);
+ }
+ }
+ </pre></blockquote></p>
+
+ @see Mapper
+ @see Partitioner]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.Reducer -->
+ <!-- start class org.apache.hadoop.mapreduce.Reducer.Context -->
+ <class name="Reducer.Context" extends="org.apache.hadoop.mapreduce.ReduceContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Reducer.Context" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapreduce.TaskAttemptID, org.apache.hadoop.mapred.RawKeyValueIterator, org.apache.hadoop.mapreduce.Counter, org.apache.hadoop.mapreduce.RecordWriter, org.apache.hadoop.mapreduce.OutputCommitter, org.apache.hadoop.mapreduce.StatusReporter, org.apache.hadoop.io.RawComparator, java.lang.Class, java.lang.Class"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </constructor>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.Reducer.Context -->
+ <!-- start class org.apache.hadoop.mapreduce.StatusReporter -->
+ <class name="StatusReporter" extends="java.lang.Object"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="StatusReporter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getCounter" return="org.apache.hadoop.mapreduce.Counter"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="name" type="java.lang.Enum"/>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapreduce.Counter"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="group" type="java.lang.String"/>
+ <param name="name" type="java.lang.String"/>
+ </method>
+ <method name="progress"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setStatus"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.StatusReporter -->
+ <!-- start class org.apache.hadoop.mapreduce.TaskAttemptContext -->
+ <class name="TaskAttemptContext" extends="org.apache.hadoop.mapreduce.JobContext"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Progressable"/>
+ <constructor name="TaskAttemptContext" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapreduce.TaskAttemptID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getTaskAttemptID" return="org.apache.hadoop.mapreduce.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the unique name for this task attempt.]]>
+ </doc>
+ </method>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="msg" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the current status of the task to the given string.]]>
+ </doc>
+ </method>
+ <method name="getStatus" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the last set status message.
+ @return the current status message]]>
+ </doc>
+ </method>
+ <method name="progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Report progress. The subtypes actually do work in this method.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[The context for task attempts.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.TaskAttemptContext -->
+ <!-- start class org.apache.hadoop.mapreduce.TaskAttemptID -->
+ <class name="TaskAttemptID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskAttemptID" type="org.apache.hadoop.mapreduce.TaskID, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskAttemptID object from given {@link TaskID}.
+ @param taskId TaskID that this task belongs to
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskAttemptID" type="java.lang.String, int, boolean, int, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param taskId taskId number
+ @param id the task attempt number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskAttemptID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getJobID" return="org.apache.hadoop.mapreduce.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link JobID} object that this task attempt belongs to]]>
+ </doc>
+ </method>
+ <method name="getTaskID" return="org.apache.hadoop.mapreduce.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link TaskID} object that this task attempt belongs to]]>
+ </doc>
+ </method>
+ <method name="isMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns whether this TaskAttemptID is a map ID]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="appendTo" return="java.lang.StringBuilder"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="builder" type="java.lang.StringBuilder"/>
+ <doc>
+ <![CDATA[Add the unique string to the StringBuilder
+ @param builder the builder to append ot
+ @return the builder that was passed in.]]>
+ </doc>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapreduce.ID"/>
+ <doc>
+ <![CDATA[Compare TaskIds by first tipIds, then by task numbers.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapreduce.TaskAttemptID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskAttemptID object from given string
+ @return constructed TaskAttemptID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <field name="ATTEMPT" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[TaskAttemptID represents the immutable and unique identifier for
+ a task attempt. Each task attempt is one particular instance of a Map or
+ Reduce Task identified by its TaskID.
+
+ TaskAttemptID consists of 2 parts. First part is the
+ {@link TaskID}, that this TaskAttemptID belongs to.
+ Second part is the task attempt number. <br>
+ An example TaskAttemptID is :
+ <code>attempt_200707121733_0003_m_000005_0</code> , which represents the
+ zeroth task attempt for the fifth map task in the third job
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskAttemptID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.TaskAttemptID -->
+ <!-- start class org.apache.hadoop.mapreduce.TaskID -->
+ <class name="TaskID" extends="org.apache.hadoop.mapred.ID"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TaskID" type="org.apache.hadoop.mapreduce.JobID, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskID object from given {@link JobID}.
+ @param jobId JobID that this tip belongs to
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskID" type="java.lang.String, int, boolean, int"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a TaskInProgressId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+ </doc>
+ </constructor>
+ <constructor name="TaskID"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getJobID" return="org.apache.hadoop.mapreduce.JobID"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns the {@link JobID} object that this tip belongs to]]>
+ </doc>
+ </method>
+ <method name="isMap" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Returns whether this TaskID is a map ID]]>
+ </doc>
+ </method>
+ <method name="equals" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="java.lang.Object"/>
+ </method>
+ <method name="compareTo" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="o" type="org.apache.hadoop.mapreduce.ID"/>
+ <doc>
+ <![CDATA[Compare TaskInProgressIds by first jobIds, then by tip numbers. Reduces are
+ defined as greater then maps.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="appendTo" return="java.lang.StringBuilder"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="builder" type="java.lang.StringBuilder"/>
+ <doc>
+ <![CDATA[Add the unique string to the given builder.
+ @param builder the builder to append to
+ @return the builder that was passed in]]>
+ </doc>
+ </method>
+ <method name="hashCode" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="forName" return="org.apache.hadoop.mapreduce.TaskID"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="str" type="java.lang.String"/>
+ <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+ <doc>
+ <![CDATA[Construct a TaskID object from given string
+ @return constructed TaskID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+ </doc>
+ </method>
+ <field name="TASK" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <field name="idFormat" type="java.text.NumberFormat"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[TaskID represents the immutable and unique identifier for
+ a Map or Reduce Task. Each TaskID encompasses multiple attempts made to
+ execute the Map or Reduce Task, each of which are uniquely indentified by
+ their TaskAttemptID.
+
+ TaskID consists of 3 parts. First part is the {@link JobID}, that this
+ TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r'
+ representing whether the task is a map task or a reduce task.
+ And the third part is the task number. <br>
+ An example TaskID is :
+ <code>task_200707121733_0003_m_000005</code> , which represents the
+ fifth map task in the third job running at the jobtracker
+ started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskID strings
+ , but rather use appropriate constructors or {@link #forName(String)}
+ method.
+
+ @see JobID
+ @see TaskAttemptID]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.TaskID -->
+ <!-- start class org.apache.hadoop.mapreduce.TaskInputOutputContext -->
+ <class name="TaskInputOutputContext" extends="org.apache.hadoop.mapreduce.TaskAttemptContext"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Progressable"/>
+ <constructor name="TaskInputOutputContext" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapreduce.TaskAttemptID, org.apache.hadoop.mapreduce.RecordWriter, org.apache.hadoop.mapreduce.OutputCommitter, org.apache.hadoop.mapreduce.StatusReporter"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="nextKeyValue" return="boolean"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Advance to the next key, value pair, returning null if at end.
+ @return the key object that was read into, or null if no more]]>
+ </doc>
+ </method>
+ <method name="getCurrentKey" return="java.lang.Object"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Get the current key.
+ @return the current key object or null if there isn't one
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="getCurrentValue" return="java.lang.Object"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Get the current value.
+ @return the value object that was read into
+ @throws IOException
+ @throws InterruptedException]]>
+ </doc>
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Generate an output key/value pair.]]>
+ </doc>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapreduce.Counter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="counterName" type="java.lang.Enum"/>
+ </method>
+ <method name="getCounter" return="org.apache.hadoop.mapreduce.Counter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="groupName" type="java.lang.String"/>
+ <param name="counterName" type="java.lang.String"/>
+ </method>
+ <method name="progress"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="setStatus"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="status" type="java.lang.String"/>
+ </method>
+ <method name="getOutputCommitter" return="org.apache.hadoop.mapreduce.OutputCommitter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <doc>
+ <![CDATA[A context object that allows input and output from the task. It is only
+ supplied to the {@link Mapper} or {@link Reducer}.
+ @param <KEYIN> the input key type for the task
+ @param <VALUEIN> the input value type for the task
+ @param <KEYOUT> the output key type for the task
+ @param <VALUEOUT> the output value type for the task]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.TaskInputOutputContext -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.input">
+ <!-- start class org.apache.hadoop.mapreduce.lib.input.FileInputFormat -->
+ <class name="FileInputFormat" extends="org.apache.hadoop.mapreduce.InputFormat"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getFormatMinSplitSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the lower bound on split size imposed by the format.
+ @return the number of bytes of the minimal split for this format]]>
+ </doc>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <param name="filename" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Is the given filename splitable? Usually, true, but if the file is
+ stream compressed, it will not be.
+
+ <code>FileInputFormat</code> implementations can override this and return
+ <code>false</code> to ensure that individual input files are never split-up
+ so that {@link Mapper}s process entire files.
+
+ @param context the job context
+ @param filename the file name to check
+ @return is this file splitable?]]>
+ </doc>
+ </method>
+ <method name="setInputPathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="filter" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job.
+ @param job the job to modify
+ @param filter the PathFilter class use for filtering the input paths.]]>
+ </doc>
+ </method>
+ <method name="setMinInputSplitSize"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Set the minimum input split size
+ @param job the job to modify
+ @param size the minimum size]]>
+ </doc>
+ </method>
+ <method name="getMinSplitSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <doc>
+ <![CDATA[Get the minimum split size
+ @param job the job
+ @return the minimum number of bytes that can be in a split]]>
+ </doc>
+ </method>
+ <method name="setMaxInputSplitSize"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="size" type="long"/>
+ <doc>
+ <![CDATA[Set the maximum split size
+ @param job the job to modify
+ @param size the maximum split size]]>
+ </doc>
+ </method>
+ <method name="getMaxSplitSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <doc>
+ <![CDATA[Get the maximum split size.
+ @param context the job to look at.
+ @return the maximum number of bytes a split can include]]>
+ </doc>
+ </method>
+ <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <doc>
+ <![CDATA[Get a PathFilter instance of the filter set for the input paths.
+
+ @return the PathFilter instance set for the job, NULL if none has been set.]]>
+ </doc>
+ </method>
+ <method name="listStatus" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression.
+
+ @param job the job to list input paths for
+ @return array of FileStatus objects
+ @throws IOException if zero items.]]>
+ </doc>
+ </method>
+ <method name="getSplits" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Generate the list of files and make them into FileSplits.]]>
+ </doc>
+ </method>
+ <method name="computeSplitSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="blockSize" type="long"/>
+ <param name="minSize" type="long"/>
+ <param name="maxSize" type="long"/>
+ </method>
+ <method name="getBlockIndex" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+ <param name="offset" type="long"/>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Sets the given comma separated paths as the list of inputs
+ for the map-reduce job.
+
+ @param job the job
+ @param commaSeparatedPaths Comma separated paths to be set as
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="commaSeparatedPaths" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add the given comma separated paths to the list of inputs for
+ the map-reduce job.
+
+ @param job The job to modify
+ @param commaSeparatedPaths Comma separated paths to be added to
+ the list of inputs for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="setInputPaths"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Set the array of {@link Path}s as the list of inputs
+ for the map-reduce job.
+
+ @param job The job to modify
+ @param inputPaths the {@link Path}s of the input directories/files
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="addInputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="path" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+
+ @param job The {@link Job} to modify
+ @param path {@link Path} to be added to the list of inputs for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <doc>
+ <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+
+ @param context The job
+ @return the list of input {@link Path}s for the map-reduce job.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A base class for file-based {@link InputFormat}s.
+
+ <p><code>FileInputFormat</code> is the base class for all file-based
+ <code>InputFormat</code>s. This provides a generic implementation of
+ {@link #getSplits(JobContext)}.
+ Subclasses of <code>FileInputFormat</code> can also override the
+ {@link #isSplitable(JobContext, Path)} method to ensure input-files are
+ not split-up and are processed as a whole by {@link Mapper}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.input.FileInputFormat -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.input.FileSplit -->
+ <class name="FileSplit" extends="org.apache.hadoop.mapreduce.InputSplit"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.io.Writable"/>
+ <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null]]>
+ </doc>
+ </constructor>
+ <method name="getPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The file containing this split's data.]]>
+ </doc>
+ </method>
+ <method name="getStart" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The position of the first byte in the file to process.]]>
+ </doc>
+ </method>
+ <method name="getLength" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[The number of bytes in the file to process.]]>
+ </doc>
+ </method>
+ <method name="toString" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="write"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="out" type="java.io.DataOutput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="readFields"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="in" type="java.io.DataInput"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getLocations" return="java.lang.String[]"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A section of an input file. Returned by {@link
+ InputFormat#getSplits(JobContext)} and passed to
+ {@link InputFormat#createRecordReader(InputSplit,TaskAttemptContext)}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.input.FileSplit -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.input.InvalidInputException -->
+ <class name="InvalidInputException" extends="java.io.IOException"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InvalidInputException" type="java.util.List"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Create the exception with the given list.
+ @param probs the list of problems to report. this list is not copied.]]>
+ </doc>
+ </constructor>
+ <method name="getProblems" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the complete list of the problems reported.
+ @return the list of problems, which must not be modified]]>
+ </doc>
+ </method>
+ <method name="getMessage" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get a summary message of the problems found.
+ @return the concatenated messages from all of the problems.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[This class wraps a list of problems with the input, so that the user
+ can get a list of problems together instead of finding and fixing them one
+ by one.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.input.InvalidInputException -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.input.LineRecordReader -->
+ <class name="LineRecordReader" extends="org.apache.hadoop.mapreduce.RecordReader"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LineRecordReader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="genericSplit" type="org.apache.hadoop.mapreduce.InputSplit"/>
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="nextKeyValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getCurrentKey" return="org.apache.hadoop.io.LongWritable"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCurrentValue" return="org.apache.hadoop.io.Text"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Get the progress within the split]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[Treats keys as offset in file and value as line.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.input.LineRecordReader -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat -->
+ <class name="SequenceFileInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.FileInputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="getFormatMinSplitSize" return="long"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </method>
+ <method name="listStatus" return="java.util.List"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader -->
+ <class name="SequenceFileRecordReader" extends="org.apache.hadoop.mapreduce.RecordReader"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileRecordReader"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="initialize"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ <method name="nextKeyValue" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ <method name="getCurrentKey" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getCurrentValue" return="java.lang.Object"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="getProgress" return="float"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+ </doc>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="conf" type="org.apache.hadoop.conf.Configuration"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[An {@link RecordReader} for {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.input.TextInputFormat -->
+ <class name="TextInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.FileInputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TextInputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ </method>
+ <method name="isSplitable" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <param name="file" type="org.apache.hadoop.fs.Path"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Keys are
+ the position in the file, and values are the line of text..]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.input.TextInputFormat -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.map">
+ <!-- start class org.apache.hadoop.mapreduce.lib.map.InverseMapper -->
+ <class name="InverseMapper" extends="org.apache.hadoop.mapreduce.Mapper"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="InverseMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[The inverse function. Input keys and values are swapped.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that swaps keys and values.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.map.InverseMapper -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.map.MultithreadedMapper -->
+ <class name="MultithreadedMapper" extends="org.apache.hadoop.mapreduce.Mapper"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="MultithreadedMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getNumberOfThreads" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <doc>
+ <![CDATA[The number of threads in the thread pool that will run the map function.
+ @param job the job
+ @return the number of threads]]>
+ </doc>
+ </method>
+ <method name="setNumberOfThreads"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="threads" type="int"/>
+ <doc>
+ <![CDATA[Set the number of threads in the pool for running maps.
+ @param job the job to modify
+ @param threads the new number of threads]]>
+ </doc>
+ </method>
+ <method name="getMapperClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <doc>
+ <![CDATA[Get the application's mapper class.
+ @param <K1> the map's input key type
+ @param <V1> the map's input value type
+ @param <K2> the map's output key type
+ @param <V2> the map's output value type
+ @param job the job
+ @return the mapper class to run]]>
+ </doc>
+ </method>
+ <method name="setMapperClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="cls" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the application's mapper class.
+ @param <K1> the map input key type
+ @param <V1> the map input value type
+ @param <K2> the map output key type
+ @param <V2> the map output value type
+ @param job the job to modify
+ @param cls the class to use as the mapper]]>
+ </doc>
+ </method>
+ <method name="run"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Run the application's maps using a thread pool.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Multithreaded implementation for @link org.apache.hadoop.mapreduce.Mapper.
+ <p>
+ It can be used instead of the default implementation,
+ @link org.apache.hadoop.mapred.MapRunner, when the Map operation is not CPU
+ bound in order to improve throughput.
+ <p>
+ Mapper implementations using this MapRunnable must be thread-safe.
+ <p>
+ The Map-Reduce job has to be configured with the mapper to use via
+ {@link #setMapperClass(Configuration, Class)} and
+ the number of thread the thread-pool can use with the
+ {@link #getNumberOfThreads(Configuration) method. The default
+ value is 10 threads.
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.map.MultithreadedMapper -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.map.TokenCounterMapper -->
+ <class name="TokenCounterMapper" extends="org.apache.hadoop.mapreduce.Mapper"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TokenCounterMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ <doc>
+ <![CDATA[Tokenize the input values and emit each word with a count of 1.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.map.TokenCounterMapper -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.output">
+ <!-- start class org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter -->
+ <class name="FileOutputCommitter" extends="org.apache.hadoop.mapreduce.OutputCommitter"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileOutputCommitter" type="org.apache.hadoop.fs.Path, org.apache.hadoop.mapreduce.TaskAttemptContext"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create a file output committer
+ @param outputPath the job's output path
+ @param context the task's context
+ @throws IOException]]>
+ </doc>
+ </constructor>
+ <method name="setupJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Create the temporary directory that is the root of all of the task
+ work directories.
+ @param context the job's context]]>
+ </doc>
+ </method>
+ <method name="cleanupJob"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Delete the temporary directory, including all of the work directories.
+ @param context the job's context]]>
+ </doc>
+ </method>
+ <method name="setupTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[No task setup required.]]>
+ </doc>
+ </method>
+ <method name="commitTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Move the files from the work directory to the job output directory
+ @param context the task context]]>
+ </doc>
+ </method>
+ <method name="abortTask"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <doc>
+ <![CDATA[Delete the work directory]]>
+ </doc>
+ </method>
+ <method name="needsTaskCommit" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Did this task write any files in the work directory?
+ @param context the task's context]]>
+ </doc>
+ </method>
+ <method name="getWorkPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the directory that the task should write results into
+ @return the work directory
+ @throws IOException]]>
+ </doc>
+ </method>
+ <field name="TEMP_DIR_NAME" type="java.lang.String"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="protected"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Temporary directory name]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An {@link OutputCommitter} that commits files specified
+ in job output directory i.e. ${mapred.output.dir}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.output.FileOutputFormat -->
+ <class name="FileOutputFormat" extends="org.apache.hadoop.mapreduce.OutputFormat"
+ abstract="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="FileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setCompressOutput"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="compress" type="boolean"/>
+ <doc>
+ <![CDATA[Set whether the output of the job is compressed.
+ @param job the job to modify
+ @param compress should the output of the job be compressed?]]>
+ </doc>
+ </method>
+ <method name="getCompressOutput" return="boolean"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <doc>
+ <![CDATA[Is the job output compressed?
+ @param job the Job to look in
+ @return <code>true</code> if the job output should be compressed,
+ <code>false</code> otherwise]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressorClass"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="codecClass" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param job the job to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+ compress the job outputs]]>
+ </doc>
+ </method>
+ <method name="getOutputCompressorClass" return="java.lang.Class"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <param name="defaultValue" type="java.lang.Class"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param job the {@link Job} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the
+ job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+ </doc>
+ </method>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+ abstract="true" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="setOutputPath"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
+ <doc>
+ <![CDATA[Set the {@link Path} of the output directory for the map-reduce job.
+
+ @param job The job to modify
+ @param outputDir the {@link Path} of the output directory for
+ the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the output directory for the map-reduce job.
+
+ @return the {@link Path} to the output directory for the map-reduce job.
+ @see FileOutputFormat#getWorkOutputPath(TaskInputOutputContext)]]>
+ </doc>
+ </method>
+ <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskInputOutputContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Get the {@link Path} to the task's temporary output directory
+ for the map-reduce job
+
+ <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4>
+
+ <p>Some applications need to create/write-to side-files, which differ from
+ the actual job-outputs.
+
+ <p>In such cases there could be issues with 2 instances of the same TIP
+ (running simultaneously e.g. speculative tasks) trying to open/write-to the
+ same file (path) on HDFS. Hence the application-writer will have to pick
+ unique names per task-attempt (e.g. using the attemptid, say
+ <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p>
+
+ <p>To get around this the Map-Reduce framework helps the application-writer
+ out by maintaining a special
+ <tt>${mapred.output.dir}/_temporary/_${taskid}</tt>
+ sub-directory for each task-attempt on HDFS where the output of the
+ task-attempt goes. On successful completion of the task-attempt the files
+ in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only)
+ are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the
+ framework discards the sub-directory of unsuccessful task-attempts. This
+ is completely transparent to the application.</p>
+
+ <p>The application-writer can take advantage of this by creating any
+ side-files required in a work directory during execution
+ of his task i.e. via
+ {@link #getWorkOutputPath(TaskInputOutputContext)}, and
+ the framework will move them out similarly - thus she doesn't have to pick
+ unique paths per task-attempt.</p>
+
+ <p>The entire discussion holds true for maps of jobs with
+ reducer=NONE (i.e. 0 reduces) since output of the map, in that case,
+ goes directly to HDFS.</p>
+
+ @return the {@link Path} to the task's temporary output directory
+ for the map-reduce job.]]>
+ </doc>
+ </method>
+ <method name="getPathForWorkFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskInputOutputContext"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="extension" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ <doc>
+ <![CDATA[Helper function to generate a {@link Path} for a file that is unique for
+ the task within the job output directory.
+
+ <p>The path can be used to create custom files from within the map and
+ reduce tasks. The path name will be unique for each task. The path parent
+ will be the job output directory.</p>ls
+
+ <p>This method uses the {@link #getUniqueFile} method to make the file name
+ unique for the task.</p>
+
+ @param context the context for the task.
+ @param name the name for the file.
+ @param extension the extension for the file
+ @return a unique path accross all tasks of the job.]]>
+ </doc>
+ </method>
+ <method name="getUniqueFile" return="java.lang.String"
+ abstract="false" native="false" synchronized="true"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <param name="name" type="java.lang.String"/>
+ <param name="extension" type="java.lang.String"/>
+ <doc>
+ <![CDATA[Generate a unique filename, based on the task id, name, and extension
+ @param context the task that is calling this
+ @param name the base filename
+ @param extension the filename extension
+ @return a string like $name-[mr]-$id$extension]]>
+ </doc>
+ </method>
+ <method name="getDefaultWorkFile" return="org.apache.hadoop.fs.Path"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <param name="extension" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[Get the default path and filename for the output format.
+ @param context the task context
+ @param extension an extension to add to the filename
+ @return a full path $output/_temporary/$taskid/part-[mr]-$id
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="getOutputCommitter" return="org.apache.hadoop.mapreduce.OutputCommitter"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A base class for {@link OutputFormat}s that read from {@link FileSystem}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.output.FileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.output.NullOutputFormat -->
+ <class name="NullOutputFormat" extends="org.apache.hadoop.mapreduce.OutputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="NullOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ </method>
+ <method name="checkOutputSpecs"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+ </method>
+ <method name="getOutputCommitter" return="org.apache.hadoop.mapreduce.OutputCommitter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ </method>
+ <doc>
+ <![CDATA[Consume all outputs and put them in /dev/null.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.output.NullOutputFormat -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat -->
+ <class name="SequenceFileOutputFormat" extends="org.apache.hadoop.mapreduce.lib.output.FileOutputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="SequenceFileOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ <method name="getOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+ <doc>
+ <![CDATA[Get the {@link CompressionType} for the output {@link SequenceFile}.
+ @param job the {@link Job}
+ @return the {@link CompressionType} for the output {@link SequenceFile},
+ defaulting to {@link CompressionType#RECORD}]]>
+ </doc>
+ </method>
+ <method name="setOutputCompressionType"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+ <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+ <doc>
+ <![CDATA[Set the {@link CompressionType} for the output {@link SequenceFile}.
+ @param job the {@link Job} to modify
+ @param style the {@link CompressionType} for the output
+ {@link SequenceFile}]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes {@link SequenceFile}s.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat -->
+ <class name="TextOutputFormat" extends="org.apache.hadoop.mapreduce.lib.output.FileOutputFormat"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="TextOutputFormat"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ <doc>
+ <![CDATA[An {@link OutputFormat} that writes plain text files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat.LineRecordWriter -->
+ <class name="TextOutputFormat.LineRecordWriter" extends="org.apache.hadoop.mapreduce.RecordWriter"
+ abstract="false"
+ static="true" final="false" visibility="protected"
+ deprecated="not deprecated">
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream, java.lang.String"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="write"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="close"
+ abstract="false" native="false" synchronized="true"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <field name="out" type="java.io.DataOutputStream"
+ transient="false" volatile="false"
+ static="false" final="false" visibility="protected"
+ deprecated="not deprecated">
+ </field>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat.LineRecordWriter -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.partition">
+ <!-- start class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner -->
+ <class name="HashPartitioner" extends="org.apache.hadoop.mapreduce.Partitioner"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="HashPartitioner"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="getPartition" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="value" type="java.lang.Object"/>
+ <param name="numReduceTasks" type="int"/>
+ <doc>
+ <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[Partition keys by their {@link Object#hashCode()}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.reduce">
+ <!-- start class org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer -->
+ <class name="IntSumReducer" extends="org.apache.hadoop.mapreduce.Reducer"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="IntSumReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="values" type="java.lang.Iterable"/>
+ <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer -->
+ <!-- start class org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer -->
+ <class name="LongSumReducer" extends="org.apache.hadoop.mapreduce.Reducer"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="LongSumReducer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="reduce"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="java.lang.Object"/>
+ <param name="values" type="java.lang.Iterable"/>
+ <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+ </method>
+ </class>
+ <!-- end class org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer -->
+</package>
+<package name="org.apache.hadoop.tools">
+ <!-- start class org.apache.hadoop.tools.DistCh -->
+ <class name="DistCh" extends="org.apache.hadoop.tools.DistTool"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[This is the main driver for recursively changing files properties.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <doc>
+ <![CDATA[A Map-reduce program to recursively change files properties
+ such as owner, group and permission.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCh -->
+ <!-- start class org.apache.hadoop.tools.DistCp -->
+ <class name="DistCp" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="DistCp" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="copy"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ <param name="srcPath" type="java.lang.String"/>
+ <param name="destPath" type="java.lang.String"/>
+ <param name="logPath" type="org.apache.hadoop.fs.Path"/>
+ <param name="srcAsList" type="boolean"/>
+ <param name="ignoreReadFailures" type="boolean"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[This is the main driver for recursively copying directories
+ across file systems. It takes at least two cmdline parameters. A source
+ URL and a destination URL. It then essentially does an "ls -lR" on the
+ source URL, and writes the output in a round-robin manner to all the map
+ input files. The mapper actually copies the files allotted to it. The
+ reduce is empty.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ </method>
+ <method name="getRandomId" return="java.lang.String"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <field name="LOG" type="org.apache.commons.logging.Log"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ </field>
+ <doc>
+ <![CDATA[A Map-reduce program to recursively copy directories between
+ different file-systems.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCp -->
+ <!-- start class org.apache.hadoop.tools.DistCp.DuplicationException -->
+ <class name="DistCp.DuplicationException" extends="java.io.IOException"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <field name="ERROR_CODE" type="int"
+ transient="false" volatile="false"
+ static="true" final="true" visibility="public"
+ deprecated="not deprecated">
+ <doc>
+ <![CDATA[Error code for this exception]]>
+ </doc>
+ </field>
+ <doc>
+ <![CDATA[An exception class for duplicated source files.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.DistCp.DuplicationException -->
+ <!-- start class org.apache.hadoop.tools.HadoopArchives -->
+ <class name="HadoopArchives" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.util.Tool"/>
+ <constructor name="HadoopArchives" type="org.apache.hadoop.conf.Configuration"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="archive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="srcPaths" type="java.util.List"/>
+ <param name="archiveName" type="java.lang.String"/>
+ <param name="dest" type="org.apache.hadoop.fs.Path"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[archive the given source paths into
+ the dest
+ @param srcPaths the src paths to be archived
+ @param dest the dest dir that will contain the archive]]>
+ </doc>
+ </method>
+ <method name="run" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <exception name="Exception" type="java.lang.Exception"/>
+ <doc>
+ <![CDATA[the main driver for creating the archives
+ it takes at least two command line parameters. The src and the
+ dest. It does an lsr on the source paths.
+ The mapper created archuves and the reducer creates
+ the archive index.]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ <doc>
+ <![CDATA[the main functions]]>
+ </doc>
+ </method>
+ <doc>
+ <![CDATA[a archive creation utility.
+ This class provides methods that can be used
+ to create hadoop archives. For understanding of
+ Hadoop archives look at {@link HarFileSystem}.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.HadoopArchives -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer -->
+ <class name="Logalyzer" extends="java.lang.Object"
+ abstract="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <constructor name="Logalyzer"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="doArchive"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="logListURI" type="java.lang.String"/>
+ <param name="archiveDirectory" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doArchive: Workhorse function to archive log-files.
+ @param logListURI : The uri which will serve list of log-files to archive.
+ @param archiveDirectory : The directory to store archived logfiles.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="doAnalyze"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="inputFilesDirectory" type="java.lang.String"/>
+ <param name="outputDirectory" type="java.lang.String"/>
+ <param name="grepPattern" type="java.lang.String"/>
+ <param name="sortColumns" type="java.lang.String"/>
+ <param name="columnSeparator" type="java.lang.String"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ <doc>
+ <![CDATA[doAnalyze:
+ @param inputFilesDirectory : Directory containing the files to be analyzed.
+ @param outputDirectory : Directory to store analysis (output).
+ @param grepPattern : Pattern to *grep* for.
+ @param sortColumns : Sort specification for output.
+ @param columnSeparator : Column separator.
+ @throws IOException]]>
+ </doc>
+ </method>
+ <method name="main"
+ abstract="false" native="false" synchronized="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="args" type="java.lang.String[]"/>
+ </method>
+ <doc>
+ <![CDATA[Logalyzer: A utility tool for archiving and analyzing hadoop logs.
+ <p>
+ This tool supports archiving and anaylzing (sort/grep) of log-files.
+ It takes as input
+ a) Input uri which will serve uris of the logs to be archived.
+ b) Output directory (not mandatory).
+ b) Directory on dfs to archive the logs.
+ c) The sort/grep patterns for analyzing the files and separator for boundaries.
+ Usage:
+ Logalyzer -archive -archiveDir <directory to archive logs> -analysis <directory> -logs <log-list uri> -grep <pattern> -sort <col1, col2> -separator <separator>
+ <p>]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <class name="Logalyzer.LogComparator" extends="org.apache.hadoop.io.Text.Comparator"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.conf.Configurable"/>
+ <constructor name="Logalyzer.LogComparator"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="setConf"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+ </method>
+ <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </method>
+ <method name="compare" return="int"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="b1" type="byte[]"/>
+ <param name="s1" type="int"/>
+ <param name="l1" type="int"/>
+ <param name="b2" type="byte[]"/>
+ <param name="s2" type="int"/>
+ <param name="l2" type="int"/>
+ </method>
+ <doc>
+ <![CDATA[A WritableComparator optimized for UTF8 keys of the logs.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogComparator -->
+ <!-- start class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+ <class name="Logalyzer.LogRegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+ abstract="false"
+ static="true" final="false" visibility="public"
+ deprecated="not deprecated">
+ <implements name="org.apache.hadoop.mapred.Mapper"/>
+ <constructor name="Logalyzer.LogRegexMapper"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ </constructor>
+ <method name="configure"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+ </method>
+ <method name="map"
+ abstract="false" native="false" synchronized="false"
+ static="false" final="false" visibility="public"
+ deprecated="not deprecated">
+ <param name="key" type="org.apache.hadoop.io.WritableComparable"/>
+ <param name="value" type="org.apache.hadoop.io.Text"/>
+ <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+ <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+ <exception name="IOException" type="java.io.IOException"/>
+ </method>
+ <doc>
+ <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+ </doc>
+ </class>
+ <!-- end class org.apache.hadoop.tools.Logalyzer.LogRegexMapper -->
+</package>
+
+</api>
diff --git a/lib/hadoop-0.20.0/lib/jets3t-0.6.1.jar b/lib/hadoop-0.20.0/lib/jets3t-0.6.1.jar
new file mode 100644
index 0000000000..e4048dd685
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jets3t-0.6.1.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/jetty-6.1.14.jar b/lib/hadoop-0.20.0/lib/jetty-6.1.14.jar
new file mode 100644
index 0000000000..8c503bea21
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jetty-6.1.14.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/jetty-util-6.1.14.jar b/lib/hadoop-0.20.0/lib/jetty-util-6.1.14.jar
new file mode 100644
index 0000000000..8f924bb147
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jetty-util-6.1.14.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/jsp-2.1/jsp-2.1.jar b/lib/hadoop-0.20.0/lib/jsp-2.1/jsp-2.1.jar
new file mode 100644
index 0000000000..bfdb566c13
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jsp-2.1/jsp-2.1.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/jsp-2.1/jsp-api-2.1.jar b/lib/hadoop-0.20.0/lib/jsp-2.1/jsp-api-2.1.jar
new file mode 100644
index 0000000000..ac3a7a8f7e
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/jsp-2.1/jsp-api-2.1.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/junit-3.8.1.jar b/lib/hadoop-0.20.0/lib/junit-3.8.1.jar
new file mode 100644
index 0000000000..674d71e89e
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/junit-3.8.1.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/kfs-0.2.2.jar b/lib/hadoop-0.20.0/lib/kfs-0.2.2.jar
new file mode 100644
index 0000000000..aa32e74baf
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/kfs-0.2.2.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/kfs-0.2.LICENSE.txt b/lib/hadoop-0.20.0/lib/kfs-0.2.LICENSE.txt
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/kfs-0.2.LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/lib/hadoop-0.20.0/lib/log4j-1.2.15.jar b/lib/hadoop-0.20.0/lib/log4j-1.2.15.jar
new file mode 100644
index 0000000000..c930a6ab4d
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/log4j-1.2.15.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/native/.DS_Store b/lib/hadoop-0.20.0/lib/native/.DS_Store
new file mode 100644
index 0000000000..ff86c6f7f2
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/native/.DS_Store
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.a b/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.a
new file mode 100644
index 0000000000..d8d90cf067
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.a
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.la b/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.la
new file mode 100644
index 0000000000..2e772fb4ae
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.la
@@ -0,0 +1,35 @@
+# libhadoop.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.22 (1.1220.2.365 2005/12/18 22:14:06)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='libhadoop.so.1'
+
+# Names of this library.
+library_names='libhadoop.so.1.0.0 libhadoop.so.1 libhadoop.so'
+
+# The name of the static archive.
+old_library='libhadoop.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/home/hadoopqa/tools/java/latest1.6-64/jre/lib/amd64/server -ljvm -ldl'
+
+# Version information for libhadoop.
+current=1
+age=0
+revision=0
+
+# Is this an already installed library?
+installed=yes
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=no
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir='/usr/local/lib'
diff --git a/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.so b/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.so
new file mode 100644
index 0000000000..fb2cbad0b5
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.so
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.so.1 b/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.so.1
new file mode 100644
index 0000000000..fb2cbad0b5
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.so.1
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.so.1.0.0 b/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.so.1.0.0
new file mode 100644
index 0000000000..fb2cbad0b5
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/native/Linux-amd64-64/libhadoop.so.1.0.0
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.a b/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.a
new file mode 100644
index 0000000000..068d2d6bf6
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.a
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.la b/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.la
new file mode 100644
index 0000000000..14941670b4
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.la
@@ -0,0 +1,35 @@
+# libhadoop.la - a libtool library file
+# Generated by ltmain.sh - GNU libtool 1.5.22 (1.1220.2.365 2005/12/18 22:14:06)
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='libhadoop.so.1'
+
+# Names of this library.
+library_names='libhadoop.so.1.0.0 libhadoop.so.1 libhadoop.so'
+
+# The name of the static archive.
+old_library='libhadoop.a'
+
+# Libraries that this one depends upon.
+dependency_libs=' -L/home/hadoopqa/tools/java/latest1.6-32/jre/lib/i386/server -ljvm -ldl'
+
+# Version information for libhadoop.
+current=1
+age=0
+revision=0
+
+# Is this an already installed library?
+installed=yes
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=no
+
+# Files to dlopen/dlpreopen
+dlopen=''
+dlpreopen=''
+
+# Directory that this library needs to be installed in:
+libdir='/usr/local/lib'
diff --git a/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.so b/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.so
new file mode 100644
index 0000000000..e3acc2b220
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.so
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.so.1 b/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.so.1
new file mode 100644
index 0000000000..e3acc2b220
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.so.1
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.so.1.0.0 b/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.so.1.0.0
new file mode 100644
index 0000000000..e3acc2b220
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/native/Linux-i386-32/libhadoop.so.1.0.0
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/oro-2.0.8.jar b/lib/hadoop-0.20.0/lib/oro-2.0.8.jar
new file mode 100644
index 0000000000..23488d2600
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/oro-2.0.8.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/servlet-api-2.5-6.1.14.jar b/lib/hadoop-0.20.0/lib/servlet-api-2.5-6.1.14.jar
new file mode 100644
index 0000000000..6d7404fb72
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/servlet-api-2.5-6.1.14.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/lib/xmlenc-0.52.jar b/lib/hadoop-0.20.0/lib/xmlenc-0.52.jar
new file mode 100644
index 0000000000..ec568b4c9e
--- /dev/null
+++ b/lib/hadoop-0.20.0/lib/xmlenc-0.52.jar
Binary files differ
diff --git a/lib/hadoop-0.20.0/librecordio/librecordio.a b/lib/hadoop-0.20.0/librecordio/librecordio.a
new file mode 100644
index 0000000000..49f7c22d1e
--- /dev/null
+++ b/lib/hadoop-0.20.0/librecordio/librecordio.a
Binary files differ
diff --git a/lib/hadoop-0.20.0/webapps/datanode/WEB-INF/web.xml b/lib/hadoop-0.20.0/webapps/datanode/WEB-INF/web.xml
new file mode 100644
index 0000000000..c271b62815
--- /dev/null
+++ b/lib/hadoop-0.20.0/webapps/datanode/WEB-INF/web.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<web-app xmlns="http://java.sun.com/xml/ns/javaee"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"
+ version="2.5">
+
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.hdfs.server.datanode.browseDirectory_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.hdfs.server.datanode.browseDirectory_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.hdfs.server.datanode.tail_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.hdfs.server.datanode.tail_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.hdfs.server.datanode.browseBlock_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.hdfs.server.datanode.browseBlock_jsp</servlet-class>
+ </servlet>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.hdfs.server.datanode.browseDirectory_jsp</servlet-name>
+ <url-pattern>/browseDirectory.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.hdfs.server.datanode.tail_jsp</servlet-name>
+ <url-pattern>/tail.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.hdfs.server.datanode.browseBlock_jsp</servlet-name>
+ <url-pattern>/browseBlock.jsp</url-pattern>
+ </servlet-mapping>
+
+</web-app>
+
diff --git a/lib/hadoop-0.20.0/webapps/hdfs/WEB-INF/web.xml b/lib/hadoop-0.20.0/webapps/hdfs/WEB-INF/web.xml
new file mode 100644
index 0000000000..40a73d5293
--- /dev/null
+++ b/lib/hadoop-0.20.0/webapps/hdfs/WEB-INF/web.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<web-app xmlns="http://java.sun.com/xml/ns/javaee"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"
+ version="2.5">
+
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.hdfs.server.namenode.nn_005fbrowsedfscontent_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.hdfs.server.namenode.nn_005fbrowsedfscontent_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.hdfs.server.namenode.dfsnodelist_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.hdfs.server.namenode.dfsnodelist_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.hdfs.server.namenode.dfshealth_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.hdfs.server.namenode.dfshealth_jsp</servlet-class>
+ </servlet>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.hdfs.server.namenode.nn_005fbrowsedfscontent_jsp</servlet-name>
+ <url-pattern>/nn_browsedfscontent.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.hdfs.server.namenode.dfsnodelist_jsp</servlet-name>
+ <url-pattern>/dfsnodelist.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.hdfs.server.namenode.dfshealth_jsp</servlet-name>
+ <url-pattern>/dfshealth.jsp</url-pattern>
+ </servlet-mapping>
+
+</web-app>
+
diff --git a/lib/hadoop-0.20.0/webapps/hdfs/index.html b/lib/hadoop-0.20.0/webapps/hdfs/index.html
new file mode 100644
index 0000000000..b9ad74218a
--- /dev/null
+++ b/lib/hadoop-0.20.0/webapps/hdfs/index.html
@@ -0,0 +1,20 @@
+<meta HTTP-EQUIV="REFRESH" content="0;url=dfshealth.jsp"/>
+<html>
+
+<head>
+<title>Hadoop Administration</title>
+</head>
+
+<body>
+
+<h1>Hadoop Administration</h1>
+
+<ul>
+
+<li><a href="dfshealth.jsp">DFS Health/Status</a></li>
+
+</ul>
+
+</body>
+
+</html>
diff --git a/lib/hadoop-0.20.0/webapps/job/WEB-INF/web.xml b/lib/hadoop-0.20.0/webapps/job/WEB-INF/web.xml
new file mode 100644
index 0000000000..e124ff744b
--- /dev/null
+++ b/lib/hadoop-0.20.0/webapps/job/WEB-INF/web.xml
@@ -0,0 +1,180 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<web-app xmlns="http://java.sun.com/xml/ns/javaee"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"
+ version="2.5">
+
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.jobqueue_005fdetails_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.jobqueue_005fdetails_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.jobtracker_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.jobtracker_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.machines_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.machines_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.taskdetailshistory_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.taskdetailshistory_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.jobhistory_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.jobhistory_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.jobconf_005fhistory_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.jobconf_005fhistory_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.loadhistory_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.loadhistory_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.jobdetailshistory_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.jobdetailshistory_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.jobtaskshistory_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.jobtaskshistory_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.jobfailures_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.jobfailures_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.taskdetails_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.taskdetails_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.analysejobhistory_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.analysejobhistory_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.jobblacklistedtrackers_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.jobblacklistedtrackers_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.jobdetails_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.jobdetails_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.jobtasks_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.jobtasks_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.jobconf_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.jobconf_jsp</servlet-class>
+ </servlet>
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.taskstats_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.taskstats_jsp</servlet-class>
+ </servlet>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.jobqueue_005fdetails_jsp</servlet-name>
+ <url-pattern>/jobqueue_details.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.jobtracker_jsp</servlet-name>
+ <url-pattern>/jobtracker.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.machines_jsp</servlet-name>
+ <url-pattern>/machines.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.taskdetailshistory_jsp</servlet-name>
+ <url-pattern>/taskdetailshistory.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.jobhistory_jsp</servlet-name>
+ <url-pattern>/jobhistory.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.jobconf_005fhistory_jsp</servlet-name>
+ <url-pattern>/jobconf_history.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.loadhistory_jsp</servlet-name>
+ <url-pattern>/loadhistory.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.jobdetailshistory_jsp</servlet-name>
+ <url-pattern>/jobdetailshistory.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.jobtaskshistory_jsp</servlet-name>
+ <url-pattern>/jobtaskshistory.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.jobfailures_jsp</servlet-name>
+ <url-pattern>/jobfailures.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.taskdetails_jsp</servlet-name>
+ <url-pattern>/taskdetails.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.analysejobhistory_jsp</servlet-name>
+ <url-pattern>/analysejobhistory.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.jobblacklistedtrackers_jsp</servlet-name>
+ <url-pattern>/jobblacklistedtrackers.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.jobdetails_jsp</servlet-name>
+ <url-pattern>/jobdetails.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.jobtasks_jsp</servlet-name>
+ <url-pattern>/jobtasks.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.jobconf_jsp</servlet-name>
+ <url-pattern>/jobconf.jsp</url-pattern>
+ </servlet-mapping>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.taskstats_jsp</servlet-name>
+ <url-pattern>/taskstats.jsp</url-pattern>
+ </servlet-mapping>
+
+</web-app>
+
diff --git a/lib/hadoop-0.20.0/webapps/job/index.html b/lib/hadoop-0.20.0/webapps/job/index.html
new file mode 100644
index 0000000000..0cc16dce3d
--- /dev/null
+++ b/lib/hadoop-0.20.0/webapps/job/index.html
@@ -0,0 +1,20 @@
+<meta HTTP-EQUIV="REFRESH" content="0;url=jobtracker.jsp"/>
+<html>
+
+<head>
+<title>Hadoop Administration</title>
+</head>
+
+<body>
+
+<h1>Hadoop Administration</h1>
+
+<ul>
+
+<li><a href="jobtracker.jsp">JobTracker</a></li>
+
+</ul>
+
+</body>
+
+</html>
diff --git a/lib/hadoop-0.20.0/webapps/static/hadoop-logo.jpg b/lib/hadoop-0.20.0/webapps/static/hadoop-logo.jpg
new file mode 100644
index 0000000000..809525d9f1
--- /dev/null
+++ b/lib/hadoop-0.20.0/webapps/static/hadoop-logo.jpg
Binary files differ
diff --git a/lib/hadoop-0.20.0/webapps/static/hadoop.css b/lib/hadoop-0.20.0/webapps/static/hadoop.css
new file mode 100644
index 0000000000..0560cb3075
--- /dev/null
+++ b/lib/hadoop-0.20.0/webapps/static/hadoop.css
@@ -0,0 +1,134 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements. See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+body {
+ background-color : #ffffff;
+ font-family : sans-serif;
+}
+
+.small {
+ font-size : smaller;
+}
+
+div#dfsnodetable tr#row1, div#dfstable td#col1 {
+ font-weight : bolder;
+}
+
+div#dfstable td#col1 {
+ vertical-align : top;
+}
+
+div#dfstable td#col3 {
+ text-align : right;
+}
+
+div#dfsnodetable caption {
+ text-align : left;
+}
+
+div#dfsnodetable a#title {
+ font-size : larger;
+ font-weight : bolder;
+}
+
+div#dfsnodetable td, th {
+ border-bottom-style : none;
+ padding-bottom : 4px;
+ padding-top : 4px;
+}
+
+div#dfsnodetable A:link, A:visited {
+ text-decoration : none;
+}
+
+div#dfsnodetable th.header, th.headerASC, th.headerDSC {
+ padding-bottom : 8px;
+ padding-top : 8px;
+}
+div#dfsnodetable th.header:hover, th.headerASC:hover, th.headerDSC:hover,
+ td.name:hover {
+ text-decoration : underline;
+ cursor : pointer;
+}
+
+div#dfsnodetable td.blocks, td.size, td.pcused, td.adminstate, td.lastcontact {
+ text-align : right;
+}
+
+div#dfsnodetable .rowNormal .header {
+ background-color : #ffffff;
+}
+div#dfsnodetable .rowAlt, .headerASC, .headerDSC {
+ background-color : lightyellow;
+}
+
+.warning {
+ font-weight : bolder;
+ color : red;
+}
+
+div#dfstable table {
+ white-space : pre;
+}
+
+div#dfsnodetable td, div#dfsnodetable th, div#dfstable td {
+ padding-left : 10px;
+ padding-right : 10px;
+}
+
+td.perc_filled {
+ background-color:#AAAAFF;
+}
+
+td.perc_nonfilled {
+ background-color:#FFFFFF;
+}
+
+line.taskgraphline {
+ stroke-width:1;stroke-linecap:round;
+}
+
+#quicklinks {
+ margin: 0;
+ padding: 2px 4px;
+ position: fixed;
+ top: 0;
+ right: 0;
+ text-align: right;
+ background-color: #eee;
+ font-weight: bold;
+}
+
+#quicklinks ul {
+ margin: 0;
+ padding: 0;
+ list-style-type: none;
+ font-weight: normal;
+}
+
+#quicklinks ul {
+ display: none;
+}
+
+#quicklinks a {
+ font-size: smaller;
+ text-decoration: none;
+}
+
+#quicklinks ul a {
+ text-decoration: underline;
+}
diff --git a/lib/hadoop-0.20.0/webapps/static/jobconf.xsl b/lib/hadoop-0.20.0/webapps/static/jobconf.xsl
new file mode 100644
index 0000000000..f3c2e33cef
--- /dev/null
+++ b/lib/hadoop-0.20.0/webapps/static/jobconf.xsl
@@ -0,0 +1,18 @@
+<?xml version="1.0"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<table border="1" align="center" >
+<tr>
+ <th>name</th>
+ <th>value</th>
+</tr>
+<xsl:for-each select="property">
+<tr>
+ <td width="35%"><b><xsl:value-of select="name"/></b></td>
+ <td width="65%"><xsl:value-of select="value"/></td>
+</tr>
+</xsl:for-each>
+</table>
+</xsl:template>
+</xsl:stylesheet>
diff --git a/lib/hadoop-0.20.0/webapps/static/jobtracker.js b/lib/hadoop-0.20.0/webapps/static/jobtracker.js
new file mode 100644
index 0000000000..7da16c1fcc
--- /dev/null
+++ b/lib/hadoop-0.20.0/webapps/static/jobtracker.js
@@ -0,0 +1,151 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements. See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+function checkButtonVerbage()
+{
+ var inputs = document.getElementsByName("jobCheckBox");
+ var check = getCheckStatus(inputs);
+
+ setCheckButtonVerbage(! check);
+}
+
+function selectAll()
+{
+ var inputs = document.getElementsByName("jobCheckBox");
+ var check = getCheckStatus(inputs);
+
+ for (var i in inputs) {
+ if ('jobCheckBox' == inputs[i].name) {
+ if ( inputs[i].parentNode.parentNode.style.display != 'none') {
+ inputs[i].checked = ! check;
+ }
+ }
+ }
+
+ setCheckButtonVerbage(check);
+}
+
+function getCheckStatus(inputs)
+{
+ var check = true;
+
+ for (var i in inputs) {
+ if ('jobCheckBox' == inputs[i].name) {
+ if ( inputs[i].parentNode.parentNode.style.display != 'none') {
+ check = (inputs[i].checked && check);
+ }
+ }
+ }
+
+ return check;
+}
+
+
+function setCheckButtonVerbage(check)
+{
+ var op = document.getElementById("checkEm");
+ op.value = check ? "Select All" : "Deselect All";
+}
+
+function applyfilter()
+{
+ var cols = ["job","priority","user","name"];
+ var nodes = [];
+ var filters = [];
+
+ for (var i = 0; i < cols.length; ++i) {
+ nodes[i] = document.getElementById(cols[i] + "_0" );
+ }
+
+ var filter = document.getElementById("filter");
+ filters = filter.value.split(' ');
+
+ var row = 0;
+ while ( nodes[0] != null ) {
+ //default display status
+ var display = true;
+
+ // for each filter
+ for (var filter_idx = 0; filter_idx < filters.length; ++filter_idx) {
+
+ // go check each column
+ if ((getDisplayStatus(nodes, filters[filter_idx], cols)) == 0) {
+ display = false;
+ break;
+ }
+ }
+
+ // set the display status
+ nodes[0].parentNode.style.display = display ? '' : 'none';
+
+ // next row
+ ++row;
+
+ // next set of controls
+ for (var i = 0; i < cols.length; ++i) {
+ nodes[i] = document.getElementById(cols[i] + "_" + row);
+ }
+ } // while
+}
+
+function getDisplayStatus(nodes, filter, cols)
+{
+ var offset = filter.indexOf(':');
+
+ var search = offset != -1 ? filter.substring(offset + 1).toLowerCase() : filter.toLowerCase();
+
+ for (var col = 0; col < cols.length; ++col) {
+ // a column specific filter
+ if (offset != -1 ) {
+ var searchCol = filter.substring(0, offset).toLowerCase();
+
+ if (searchCol == cols[col]) {
+ // special case jobs to remove unnecessary stuff
+ return containsIgnoreCase(stripHtml(nodes[col].innerHTML), search);
+ }
+ } else if (containsIgnoreCase(stripHtml(nodes[col].innerHTML), filter)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+function stripHtml(text)
+{
+ return text.replace(/<[^>]*>/g,'').replace(/&[^;]*;/g,'');
+}
+
+function containsIgnoreCase(haystack, needle)
+{
+ return haystack.toLowerCase().indexOf(needle.toLowerCase()) != -1;
+}
+
+function confirmAction()
+{
+ return confirm("Are you sure?");
+}
+
+function toggle(id)
+{
+ if ( document.getElementById(id).style.display != 'block') {
+ document.getElementById(id).style.display = 'block';
+ }
+ else {
+ document.getElementById(id).style.display = 'none';
+ }
+}
diff --git a/lib/hadoop-0.20.0/webapps/task/WEB-INF/web.xml b/lib/hadoop-0.20.0/webapps/task/WEB-INF/web.xml
new file mode 100644
index 0000000000..44a03bed6b
--- /dev/null
+++ b/lib/hadoop-0.20.0/webapps/task/WEB-INF/web.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<web-app xmlns="http://java.sun.com/xml/ns/javaee"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"
+ version="2.5">
+
+
+ <servlet>
+ <servlet-name>org.apache.hadoop.mapred.tasktracker_jsp</servlet-name>
+ <servlet-class>org.apache.hadoop.mapred.tasktracker_jsp</servlet-class>
+ </servlet>
+
+ <servlet-mapping>
+ <servlet-name>org.apache.hadoop.mapred.tasktracker_jsp</servlet-name>
+ <url-pattern>/tasktracker.jsp</url-pattern>
+ </servlet-mapping>
+
+</web-app>
+
diff --git a/lib/hadoop-0.20.0/webapps/task/index.html b/lib/hadoop-0.20.0/webapps/task/index.html
new file mode 100644
index 0000000000..ab3d56ba46
--- /dev/null
+++ b/lib/hadoop-0.20.0/webapps/task/index.html
@@ -0,0 +1 @@
+<meta HTTP-EQUIV="REFRESH" content="0;url=tasktracker.jsp"/>
diff --git a/lib/jetty-7.1.6.v20100715/jetty-server-7.1.6.v20100715.jar b/lib/jetty-7.1.6.v20100715/jetty-server-7.1.6.v20100715.jar
new file mode 100644
index 0000000000..d9ef50be6d
--- /dev/null
+++ b/lib/jetty-7.1.6.v20100715/jetty-server-7.1.6.v20100715.jar
Binary files differ
diff --git a/lib/jetty-7.1.6.v20100715/servlet-api-2.5.jar b/lib/jetty-7.1.6.v20100715/servlet-api-2.5.jar
new file mode 100644
index 0000000000..fb52493468
--- /dev/null
+++ b/lib/jetty-7.1.6.v20100715/servlet-api-2.5.jar
Binary files differ
diff --git a/lib/jline.jar b/lib/jline.jar
new file mode 100644
index 0000000000..6ed67faab6
--- /dev/null
+++ b/lib/jline.jar
Binary files differ
diff --git a/lib/liblzf-3.5/Changes b/lib/liblzf-3.5/Changes
new file mode 100644
index 0000000000..09581ff20f
--- /dev/null
+++ b/lib/liblzf-3.5/Changes
@@ -0,0 +1,125 @@
+3.5 Fri May 1 02:28:42 CEST 2009
+ - lzf_compress did sometimes write one octet past the given output
+ buffer (analyzed and nice testcase by Salvatore Sanfilippo).
+
+3.4 Tue Sep 2 06:45:00 CEST 2008
+ - the fix from 3.3 introduced a compression bug, which is fixed in
+ this release (which explains the mysterious prerelease...). Thanks
+ once more to Clément Calmels.
+
+3.3 Mon Aug 25 03:17:42 CEST 2008
+ - lzf_compress could access memory after the given input buffer
+ when outputting back references. reported with nice testcase
+ by Clément Calmels.
+
+3.2 Fri May 9 18:52:23 CEST 2008
+ - include a workaround for failing POSIX and real-world compliance
+ on 64 bit windows (microsoft claims to support POSIX, but is far
+ from it). (bug found and analysed nicely by John Lilley).
+
+3.1 Fri Nov 30 11:33:04 CET 2007
+ - IMPORTANT BUGFIX: a too long final literal run would corrupt data
+ in the encoder (this was introduced in 3.0 only, earlier versions
+ are safe).
+
+3.0 Tue Nov 13 22:13:09 CET 2007
+ - switched to 2-clause bsd with "GPL v2 or any later version" option.
+ - speed up compression by ~10-15% in common cases
+ by some manual unrolling.
+ - import some compiler tricks from JSON::XS, for further speed-ups.
+ - tune hash functions depending on ULTRA_FAST or VERY_FAST settings.
+ - for typical binary data (e.g. /bin/bash, memory dumps,
+ canterbury corpus etc.), speed is now comparable to fastlz, but
+ with better compression ratio. with ULTRA_FAST, it's typically
+ 3-15% faster than fastlz while still maintaining a similar ratio.
+ (amd64 and core 2 duo, ymmv). thanks a lot for the competition :)
+ - undo inline assembly in compressor, it is no longer helpful.
+ - no changes to the decompressor.
+ - use a HLOG of 16 by default now (formerly 15).
+
+2.1 Fri Nov 2 13:34:42 CET 2007
+ - switched to a 2-clause bsd license with GPL exception.
+ - get rid of memcpy.
+ - tentatively use rep movsb on x86 and x86_64 (gcc only) for a
+ moderate speed improvement.
+ - applied patch by Kein-Hong Man to maske lzf.c compile under
+ the crippled mingw32 environment.
+
+2.0 Fri Feb 16 23:11:18 CET 2007
+ - replaced lzf demo by industrial-strength lzf utility with behaviour
+ similar other compression utilities. Thanks for Stefan Traby for
+ rewriting it!
+ - fix state arg prototype.
+
+1.7 Wed Sep 27 17:29:15 CEST 2006
+ - remove bogus "unlzf" patch.
+ note to self: never accept well-meant patches.
+ - make lzf more robust in presence of padding bytes or sudden eof.
+
+1.6 Fri Jul 7 17:31:26 CEST 2006
+ - the lzf example utility will now uncompress if invoked
+ as "unlzf" (patch by Scott Feeney).
+ - add CHECK_INPUT option that adds more checks for input
+ data validity.
+ - help applications that do not pass in the correct length
+ (such as php) by returning either EINVAL or E2BIG.
+ - default HLOG size is now 15 (cpu caches have increased).
+ - documentation fixes.
+
+1.51 Thu Apr 14 22:15:46 CEST 2005
+ - incorporated C♯ implementation of both the en- and decoder,
+ written by "Oren J. Maurice".
+ You can find it in the cs/ subdirectory.
+ - make FRST, NEXT IDX overridable if lzf_c.c is directly included
+ in the code.
+
+1.5 Tue Mar 8 20:23:23 CET 2005
+ - incorporated improvements by Adam D. Moss,
+ which includes a new VERY_FAST mode which is
+ a bit slower than ULTRA_FAST but much better,
+ and enabled it as default.
+
+1.401 Thu Mar 3 18:00:52 CET 2005
+ - use cstring in c++, not string.h.
+ - change of contact address.
+
+1.4 Wed Dec 15 08:08:49 CET 2004
+ - very very slight tuning of the hashing function.
+
+1.3 Thu Mar 25 15:41:17 CET 2004
+ - changed license of lzf core code to explicitly allow
+ relicensing under the GPLv2.
+ - added VPATH support as suggested by Björn Eriksson.
+
+1.2 Mon Dec 29 13:47:28 CET 2003
+ - avoid spurious memory accesses after the to-be-compressed
+ memory region. originally reported by Michal Zalewski.
+ - flip LZF_STACK_ARG meaning (to be correct).
+
+1.1 Tue Dec 23 05:48:32 CET 2003
+ - removed #warn directive, it's not worth the hassle.
+ - add LZF_STACK_ARG and AVOID_ERRNO configurations
+ for embedded systems.
+ - make it compile cleanly as c++.
+ - some small documentation and code fixes.
+
+1.0 Sun Nov 17 12:37:37 CET 2002
+ - slightly better compression ratio, almost unmeasurably
+ slower.
+ - some documentation fixes.
+
+0.4 Thu Jun 13 14:11:10 CEST 2002
+ - typoe fix.
+ - lzf demo program now properly decompresses small files.
+ - fix another 64 bit issue, found by Laurent Deniel.
+
+0.3 Tue Jan 16 13:21:14 CET 2001
+ - fix silly beginners 32/64 bit mistake.
+
+0.2 Thu Jan 4 05:56:42 CET 2001
+ - now totally independent of autoconfig, for
+ easy inclusion into other programs.
+ - much better fine-tuning, faster and better than 0.1.
+
+0.1 2000
+ - initial release.
diff --git a/lib/liblzf-3.5/LICENSE b/lib/liblzf-3.5/LICENSE
new file mode 100644
index 0000000000..9d8e6ec9c8
--- /dev/null
+++ b/lib/liblzf-3.5/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2000-2007 Marc Alexander Lehmann <schmorp@schmorp.de>
+
+Redistribution and use in source and binary forms, with or without modifica-
+tion, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
+CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
+CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
+ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Alternatively, the following files carry an additional notice that
+explicitly allows relicensing under the GPLv2: lzf.c lzf.h lzfP.h lzf_c.c
+lzf_d.c
+
diff --git a/lib/liblzf-3.5/Makefile b/lib/liblzf-3.5/Makefile
new file mode 100644
index 0000000000..2af118f9d3
--- /dev/null
+++ b/lib/liblzf-3.5/Makefile
@@ -0,0 +1,66 @@
+VERSION = 3.5
+
+prefix = /usr/local
+exec_prefix = ${prefix}
+libdir = ${exec_prefix}/lib
+bindir = ${exec_prefix}/bin
+includedir = ${prefix}/include
+
+
+
+CC = gcc
+CPPFLAGS = -I.
+CFLAGS = -fPIC -O3 -funroll-all-loops
+LDFLAGS =
+RANLIB = ranlib
+INSTALL = /usr/bin/install -c
+INSTALL_DATA = ${INSTALL} -m 644
+
+all: Makefile lzf
+
+clean:
+ -rm -f *.o *.a lzf bench
+
+lzf_c.o: lzf_c.c lzfP.h
+
+lzf_d.o: lzf_d.c lzfP.h
+
+lzf.o: lzf.c
+
+lzf: lzf.o liblzf.a
+
+lzfP.h: lzf.h config.h
+
+liblzf.a: lzf_c.o lzf_d.o
+ rm -f $@
+ $(AR) rc $@ $^
+ $(RANLIB) $@
+
+install: all
+ $(INSTALL) -d $(bindir)
+ $(INSTALL) -m 755 lzf $(bindir)
+ $(INSTALL) -d $(includedir)
+ $(INSTALL_DATA) lzf.h $(includedir)
+ $(INSTALL) -d $(libdir)
+ $(INSTALL_DATA) liblzf.a $(libdir)
+
+dist:
+ mkdir liblzf-$(VERSION)
+ tar c LICENSE README Makefile.in config.h.in \
+ configure configure.ac install-sh \
+ cs/README cs/CLZF.cs \
+ lzf.h lzfP.h lzf_c.c lzf_d.c \
+ crc32.h lzf.c Changes \
+ | tar xpC liblzf-$(VERSION)
+ -chown -R root.root liblzf-$(VERSION)
+ chmod -R u=rwX,go=rX liblzf-$(VERSION)
+ tar cvf - liblzf-$(VERSION) | gzip -9 >liblzf-$(VERSION).tar.gz
+ rm -rf liblzf-$(VERSION)
+ ls -l liblzf-$(VERSION).tar.gz
+
+Makefile: Makefile.in
+ ./config.status
+
+bench: Makefile liblzf.a bench.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) -g -o bench bench.c -L. -llzf
+
diff --git a/lib/liblzf-3.5/Makefile.in b/lib/liblzf-3.5/Makefile.in
new file mode 100644
index 0000000000..bbd53c0370
--- /dev/null
+++ b/lib/liblzf-3.5/Makefile.in
@@ -0,0 +1,66 @@
+VERSION = 3.5
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+libdir = @libdir@
+bindir = @bindir@
+includedir = @includedir@
+
+VPATH = @srcdir@
+
+CC = @CC@
+CPPFLAGS = -I. @CPPFLAGS@
+CFLAGS = @CFLAGS@
+LDFLAGS = @LDFLAGS@
+RANLIB = @RANLIB@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+
+all: Makefile lzf
+
+clean:
+ -rm -f *.o *.a lzf bench
+
+lzf_c.o: lzf_c.c lzfP.h
+
+lzf_d.o: lzf_d.c lzfP.h
+
+lzf.o: lzf.c
+
+lzf: lzf.o liblzf.a
+
+lzfP.h: lzf.h config.h
+
+liblzf.a: lzf_c.o lzf_d.o
+ rm -f $@
+ $(AR) rc $@ $^
+ $(RANLIB) $@
+
+install: all
+ $(INSTALL) -d $(bindir)
+ $(INSTALL) -m 755 lzf $(bindir)
+ $(INSTALL) -d $(includedir)
+ $(INSTALL_DATA) lzf.h $(includedir)
+ $(INSTALL) -d $(libdir)
+ $(INSTALL_DATA) liblzf.a $(libdir)
+
+dist:
+ mkdir liblzf-$(VERSION)
+ tar c LICENSE README Makefile.in config.h.in \
+ configure configure.ac install-sh \
+ cs/README cs/CLZF.cs \
+ lzf.h lzfP.h lzf_c.c lzf_d.c \
+ crc32.h lzf.c Changes \
+ | tar xpC liblzf-$(VERSION)
+ -chown -R root.root liblzf-$(VERSION)
+ chmod -R u=rwX,go=rX liblzf-$(VERSION)
+ tar cvf - liblzf-$(VERSION) | gzip -9 >liblzf-$(VERSION).tar.gz
+ rm -rf liblzf-$(VERSION)
+ ls -l liblzf-$(VERSION).tar.gz
+
+Makefile: Makefile.in
+ ./config.status
+
+bench: Makefile liblzf.a bench.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) -g -o bench bench.c -L. -llzf
+
diff --git a/lib/liblzf-3.5/README b/lib/liblzf-3.5/README
new file mode 100644
index 0000000000..b5379a3892
--- /dev/null
+++ b/lib/liblzf-3.5/README
@@ -0,0 +1,29 @@
+DESCRIPTION
+ LZF is an extremely fast (not that much slower than a pure memcpy)
+ compression algorithm. It is ideal for applications where you want to
+ save *some* space but not at the cost of speed. It is ideal for
+ repetitive data as well. The module is self-contained and very small.
+
+ It's written in ISO-C with no external dependencies other than what
+ C provides and can easily be #include'd into your code, no makefile
+ changes or library builds requires.
+
+ A C♯ implementation without external dependencies is available, too.
+
+ I do not know for certain wether any patents in any countries apply
+ to this algorithm, but at the moment it is believed that it is free
+ from any patents. More importantly, it is also free to use in every
+ software package (see LICENSE).
+
+ See the lzf.h file for details on how the functions in this
+ mini-library are to be used.
+
+ NOTE: This package contains a very bare-bones command-line utility
+ which is neither optimized for speed nor for compression. This library
+ is really intented to be used inside larger programs.
+
+AUTHOR
+ This library was written by Marc Lehmann <schmorp@schmorp.de> (See also
+ http://software.schmorp.de/pkg/liblzf).
+
+
diff --git a/lib/liblzf-3.5/config.h b/lib/liblzf-3.5/config.h
new file mode 100644
index 0000000000..966d01ff7d
--- /dev/null
+++ b/lib/liblzf-3.5/config.h
@@ -0,0 +1,17 @@
+/* config.h. Generated from config.h.in by configure. */
+/* config.h.in. Generated automatically from configure.in by autoheader 2.13. */
+
+/* Define to empty if the keyword does not work. */
+/* #undef const */
+
+/* Define if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* The number of bytes in a int. */
+#define SIZEOF_INT 4
+
+/* The number of bytes in a long. */
+#define SIZEOF_LONG 8
+
+/* The number of bytes in a short. */
+#define SIZEOF_SHORT 2
diff --git a/lib/liblzf-3.5/config.h.in b/lib/liblzf-3.5/config.h.in
new file mode 100644
index 0000000000..5fd69c6bd5
--- /dev/null
+++ b/lib/liblzf-3.5/config.h.in
@@ -0,0 +1,16 @@
+/* config.h.in. Generated automatically from configure.in by autoheader 2.13. */
+
+/* Define to empty if the keyword does not work. */
+#undef const
+
+/* Define if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* The number of bytes in a int. */
+#undef SIZEOF_INT
+
+/* The number of bytes in a long. */
+#undef SIZEOF_LONG
+
+/* The number of bytes in a short. */
+#undef SIZEOF_SHORT
diff --git a/lib/liblzf-3.5/config.log b/lib/liblzf-3.5/config.log
new file mode 100644
index 0000000000..dfa0a83845
--- /dev/null
+++ b/lib/liblzf-3.5/config.log
@@ -0,0 +1,515 @@
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by configure, which was
+generated by GNU Autoconf 2.60. Invocation command line was
+
+ $ ./configure
+
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = ubuntu
+uname -m = x86_64
+uname -r = 2.6.31-14-server
+uname -s = Linux
+uname -v = #48-Ubuntu SMP Fri Oct 16 15:07:34 UTC 2009
+
+/usr/bin/uname -p = unknown
+/bin/uname -X = unknown
+
+/bin/arch = unknown
+/usr/bin/arch -k = unknown
+/usr/convex/getsysinfo = unknown
+/usr/bin/hostinfo = unknown
+/bin/machine = unknown
+/usr/bin/oslevel = unknown
+/bin/universe = unknown
+
+PATH: /usr/local/sbin
+PATH: /usr/local/bin
+PATH: /usr/sbin
+PATH: /usr/bin
+PATH: /sbin
+PATH: /bin
+PATH: /usr/games
+PATH: /home/matei/scala-2.7.7.final/bin
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+configure:1723: checking for gcc
+configure:1739: found /usr/bin/gcc
+configure:1750: result: gcc
+configure:1988: checking for C compiler version
+configure:1995: gcc --version >&5
+gcc (Ubuntu 4.4.1-4ubuntu8) 4.4.1
+Copyright (C) 2009 Free Software Foundation, Inc.
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+configure:1998: $? = 0
+configure:2005: gcc -v >&5
+Using built-in specs.
+Target: x86_64-linux-gnu
+Configured with: ../src/configure -v --with-pkgversion='Ubuntu 4.4.1-4ubuntu8' --with-bugurl=file:///usr/share/doc/gcc-4.4/README.Bugs --enable-languages=c,c++,fortran,objc,obj-c++ --prefix=/usr --enable-shared --enable-multiarch --enable-linker-build-id --with-system-zlib --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --with-gxx-include-dir=/usr/include/c++/4.4 --program-suffix=-4.4 --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-objc-gc --disable-werror --with-arch-32=i486 --with-tune=generic --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu
+Thread model: posix
+gcc version 4.4.1 (Ubuntu 4.4.1-4ubuntu8)
+configure:2008: $? = 0
+configure:2015: gcc -V >&5
+gcc: '-V' option must have argument
+configure:2018: $? = 1
+configure:2041: checking for C compiler default output file name
+configure:2068: gcc -fPIC conftest.c >&5
+configure:2071: $? = 0
+configure:2117: result: a.out
+configure:2122: checking whether the C compiler works
+configure:2132: ./a.out
+configure:2135: $? = 0
+configure:2152: result: yes
+configure:2159: checking whether we are cross compiling
+configure:2161: result: no
+configure:2164: checking for suffix of executables
+configure:2171: gcc -o conftest -fPIC conftest.c >&5
+configure:2174: $? = 0
+configure:2198: result:
+configure:2204: checking for suffix of object files
+configure:2230: gcc -c -fPIC conftest.c >&5
+configure:2233: $? = 0
+configure:2256: result: o
+configure:2260: checking whether we are using the GNU C compiler
+configure:2289: gcc -c -fPIC conftest.c >&5
+configure:2295: $? = 0
+configure:2302: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:2305: $? = 0
+configure:2312: test -s conftest.o
+configure:2315: $? = 0
+configure:2329: result: yes
+configure:2334: checking whether gcc accepts -g
+configure:2364: gcc -c -g conftest.c >&5
+configure:2370: $? = 0
+configure:2377: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:2380: $? = 0
+configure:2387: test -s conftest.o
+configure:2390: $? = 0
+configure:2520: result: yes
+configure:2537: checking for gcc option to accept ISO C89
+configure:2611: gcc -c -fPIC conftest.c >&5
+configure:2617: $? = 0
+configure:2624: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:2627: $? = 0
+configure:2634: test -s conftest.o
+configure:2637: $? = 0
+configure:2657: result: none needed
+configure:2683: checking for special C compiler options needed for large files
+configure:2810: result: no
+configure:2816: checking for _FILE_OFFSET_BITS value needed for large files
+configure:2852: gcc -c -fPIC conftest.c >&5
+configure:2858: $? = 0
+configure:2865: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:2868: $? = 0
+configure:2875: test -s conftest.o
+configure:2878: $? = 0
+configure:2959: result: no
+configure:2969: checking for _LARGE_FILES value needed for large files
+configure:3005: gcc -c -fPIC conftest.c >&5
+configure:3011: $? = 0
+configure:3018: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:3021: $? = 0
+configure:3028: test -s conftest.o
+configure:3031: $? = 0
+configure:3112: result: no
+configure:3172: checking for gcc
+configure:3199: result: gcc
+configure:3437: checking for C compiler version
+configure:3444: gcc --version >&5
+gcc (Ubuntu 4.4.1-4ubuntu8) 4.4.1
+Copyright (C) 2009 Free Software Foundation, Inc.
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+configure:3447: $? = 0
+configure:3454: gcc -v >&5
+Using built-in specs.
+Target: x86_64-linux-gnu
+Configured with: ../src/configure -v --with-pkgversion='Ubuntu 4.4.1-4ubuntu8' --with-bugurl=file:///usr/share/doc/gcc-4.4/README.Bugs --enable-languages=c,c++,fortran,objc,obj-c++ --prefix=/usr --enable-shared --enable-multiarch --enable-linker-build-id --with-system-zlib --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --with-gxx-include-dir=/usr/include/c++/4.4 --program-suffix=-4.4 --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-objc-gc --disable-werror --with-arch-32=i486 --with-tune=generic --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu
+Thread model: posix
+gcc version 4.4.1 (Ubuntu 4.4.1-4ubuntu8)
+configure:3457: $? = 0
+configure:3464: gcc -V >&5
+gcc: '-V' option must have argument
+configure:3467: $? = 1
+configure:3470: checking whether we are using the GNU C compiler
+configure:3539: result: yes
+configure:3544: checking whether gcc accepts -g
+configure:3730: result: yes
+configure:3747: checking for gcc option to accept ISO C89
+configure:3867: result: none needed
+configure:3928: checking for ranlib
+configure:3944: found /usr/bin/ranlib
+configure:3955: result: ranlib
+configure:4025: checking for a BSD-compatible install
+configure:4081: result: /usr/bin/install -c
+configure:4097: checking how to run the C preprocessor
+configure:4137: gcc -E conftest.c
+configure:4143: $? = 0
+configure:4181: gcc -E conftest.c
+conftest.c:9:28: error: ac_nonexistent.h: No such file or directory
+configure:4187: $? = 1
+configure: failed program was:
+| /* confdefs.h. */
+| #define PACKAGE_NAME ""
+| #define PACKAGE_TARNAME ""
+| #define PACKAGE_VERSION ""
+| #define PACKAGE_STRING ""
+| #define PACKAGE_BUGREPORT ""
+| #define _GNU_SOURCE 1
+| /* end confdefs.h. */
+| #include <ac_nonexistent.h>
+configure:4227: result: gcc -E
+configure:4256: gcc -E conftest.c
+configure:4262: $? = 0
+configure:4300: gcc -E conftest.c
+conftest.c:9:28: error: ac_nonexistent.h: No such file or directory
+configure:4306: $? = 1
+configure: failed program was:
+| /* confdefs.h. */
+| #define PACKAGE_NAME ""
+| #define PACKAGE_TARNAME ""
+| #define PACKAGE_VERSION ""
+| #define PACKAGE_STRING ""
+| #define PACKAGE_BUGREPORT ""
+| #define _GNU_SOURCE 1
+| /* end confdefs.h. */
+| #include <ac_nonexistent.h>
+configure:4351: checking for grep that handles long lines and -e
+configure:4425: result: /bin/grep
+configure:4430: checking for egrep
+configure:4508: result: /bin/grep -E
+configure:4513: checking for ANSI C header files
+configure:4543: gcc -c -fPIC conftest.c >&5
+configure:4549: $? = 0
+configure:4556: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:4559: $? = 0
+configure:4566: test -s conftest.o
+configure:4569: $? = 0
+configure:4665: gcc -o conftest -fPIC conftest.c >&5
+configure:4668: $? = 0
+configure:4674: ./conftest
+configure:4677: $? = 0
+configure:4694: result: yes
+configure:4719: checking for sys/types.h
+configure:4740: gcc -c -fPIC conftest.c >&5
+configure:4746: $? = 0
+configure:4753: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:4756: $? = 0
+configure:4763: test -s conftest.o
+configure:4766: $? = 0
+configure:4779: result: yes
+configure:4719: checking for sys/stat.h
+configure:4740: gcc -c -fPIC conftest.c >&5
+configure:4746: $? = 0
+configure:4753: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:4756: $? = 0
+configure:4763: test -s conftest.o
+configure:4766: $? = 0
+configure:4779: result: yes
+configure:4719: checking for stdlib.h
+configure:4740: gcc -c -fPIC conftest.c >&5
+configure:4746: $? = 0
+configure:4753: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:4756: $? = 0
+configure:4763: test -s conftest.o
+configure:4766: $? = 0
+configure:4779: result: yes
+configure:4719: checking for string.h
+configure:4740: gcc -c -fPIC conftest.c >&5
+configure:4746: $? = 0
+configure:4753: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:4756: $? = 0
+configure:4763: test -s conftest.o
+configure:4766: $? = 0
+configure:4779: result: yes
+configure:4719: checking for memory.h
+configure:4740: gcc -c -fPIC conftest.c >&5
+configure:4746: $? = 0
+configure:4753: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:4756: $? = 0
+configure:4763: test -s conftest.o
+configure:4766: $? = 0
+configure:4779: result: yes
+configure:4719: checking for strings.h
+configure:4740: gcc -c -fPIC conftest.c >&5
+configure:4746: $? = 0
+configure:4753: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:4756: $? = 0
+configure:4763: test -s conftest.o
+configure:4766: $? = 0
+configure:4779: result: yes
+configure:4719: checking for inttypes.h
+configure:4740: gcc -c -fPIC conftest.c >&5
+configure:4746: $? = 0
+configure:4753: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:4756: $? = 0
+configure:4763: test -s conftest.o
+configure:4766: $? = 0
+configure:4779: result: yes
+configure:4719: checking for stdint.h
+configure:4740: gcc -c -fPIC conftest.c >&5
+configure:4746: $? = 0
+configure:4753: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:4756: $? = 0
+configure:4763: test -s conftest.o
+configure:4766: $? = 0
+configure:4779: result: yes
+configure:4719: checking for unistd.h
+configure:4740: gcc -c -fPIC conftest.c >&5
+configure:4746: $? = 0
+configure:4753: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:4756: $? = 0
+configure:4763: test -s conftest.o
+configure:4766: $? = 0
+configure:4779: result: yes
+configure:4791: checking for short
+configure:4821: gcc -c -fPIC conftest.c >&5
+configure:4827: $? = 0
+configure:4834: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:4837: $? = 0
+configure:4844: test -s conftest.o
+configure:4847: $? = 0
+configure:4859: result: yes
+configure:4862: checking size of short
+configure:5250: gcc -o conftest -fPIC conftest.c >&5
+configure:5253: $? = 0
+configure:5259: ./conftest
+configure:5262: $? = 0
+configure:5284: result: 2
+configure:5291: checking for int
+configure:5321: gcc -c -fPIC conftest.c >&5
+configure:5327: $? = 0
+configure:5334: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:5337: $? = 0
+configure:5344: test -s conftest.o
+configure:5347: $? = 0
+configure:5359: result: yes
+configure:5362: checking size of int
+configure:5750: gcc -o conftest -fPIC conftest.c >&5
+configure:5753: $? = 0
+configure:5759: ./conftest
+configure:5762: $? = 0
+configure:5784: result: 4
+configure:5791: checking for long
+configure:5821: gcc -c -fPIC conftest.c >&5
+configure:5827: $? = 0
+configure:5834: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:5837: $? = 0
+configure:5844: test -s conftest.o
+configure:5847: $? = 0
+configure:5859: result: yes
+configure:5862: checking size of long
+configure:6250: gcc -o conftest -fPIC conftest.c >&5
+configure:6253: $? = 0
+configure:6259: ./conftest
+configure:6262: $? = 0
+configure:6284: result: 8
+configure:6292: checking for an ANSI C-conforming const
+configure:6367: gcc -c -fPIC conftest.c >&5
+configure:6373: $? = 0
+configure:6380: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:6383: $? = 0
+configure:6390: test -s conftest.o
+configure:6393: $? = 0
+configure:6405: result: yes
+configure:6415: checking for inline
+configure:6441: gcc -c -fPIC conftest.c >&5
+configure:6447: $? = 0
+configure:6454: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:6457: $? = 0
+configure:6464: test -s conftest.o
+configure:6467: $? = 0
+configure:6482: result: inline
+configure:6516: checking getopt.h usability
+configure:6533: gcc -c -fPIC conftest.c >&5
+configure:6539: $? = 0
+configure:6546: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:6549: $? = 0
+configure:6556: test -s conftest.o
+configure:6559: $? = 0
+configure:6570: result: yes
+configure:6574: checking getopt.h presence
+configure:6589: gcc -E conftest.c
+configure:6595: $? = 0
+configure:6616: result: yes
+configure:6644: checking for getopt.h
+configure:6652: result: yes
+configure:6669: checking for getopt_long
+configure:6725: gcc -o conftest -fPIC conftest.c >&5
+configure:6731: $? = 0
+configure:6738: test -z "$ac_c_werror_flag" || test ! -s conftest.err
+configure:6741: $? = 0
+configure:6748: test -s conftest
+configure:6751: $? = 0
+configure:6765: result: yes
+configure:6885: creating ./config.status
+
+## ---------------------- ##
+## Running config.status. ##
+## ---------------------- ##
+
+This file was extended by config.status, which was
+generated by GNU Autoconf 2.60. Invocation command line was
+
+ CONFIG_FILES =
+ CONFIG_HEADERS =
+ CONFIG_LINKS =
+ CONFIG_COMMANDS =
+ $ ./config.status
+
+on ubuntu
+
+config.status:583: creating Makefile
+config.status:583: creating config.h
+config.status:807: config.h is unchanged
+
+## ---------------- ##
+## Cache variables. ##
+## ---------------- ##
+
+ac_cv_c_compiler_gnu=yes
+ac_cv_c_const=yes
+ac_cv_c_inline=inline
+ac_cv_env_CC_set=
+ac_cv_env_CC_value=
+ac_cv_env_CFLAGS_set=set
+ac_cv_env_CFLAGS_value=-fPIC
+ac_cv_env_CPPFLAGS_set=
+ac_cv_env_CPPFLAGS_value=
+ac_cv_env_CPP_set=
+ac_cv_env_CPP_value=
+ac_cv_env_LDFLAGS_set=
+ac_cv_env_LDFLAGS_value=
+ac_cv_env_build_alias_set=
+ac_cv_env_build_alias_value=
+ac_cv_env_host_alias_set=
+ac_cv_env_host_alias_value=
+ac_cv_env_target_alias_set=
+ac_cv_env_target_alias_value=
+ac_cv_func_getopt_long=yes
+ac_cv_header_getopt_h=yes
+ac_cv_header_inttypes_h=yes
+ac_cv_header_memory_h=yes
+ac_cv_header_stdc=yes
+ac_cv_header_stdint_h=yes
+ac_cv_header_stdlib_h=yes
+ac_cv_header_string_h=yes
+ac_cv_header_strings_h=yes
+ac_cv_header_sys_stat_h=yes
+ac_cv_header_sys_types_h=yes
+ac_cv_header_unistd_h=yes
+ac_cv_objext=o
+ac_cv_path_EGREP='/bin/grep -E'
+ac_cv_path_GREP=/bin/grep
+ac_cv_path_install='/usr/bin/install -c'
+ac_cv_prog_CPP='gcc -E'
+ac_cv_prog_ac_ct_CC=gcc
+ac_cv_prog_ac_ct_RANLIB=ranlib
+ac_cv_prog_cc_c89=
+ac_cv_prog_cc_g=yes
+ac_cv_sizeof_int=4
+ac_cv_sizeof_long=8
+ac_cv_sizeof_short=2
+ac_cv_sys_file_offset_bits=no
+ac_cv_sys_large_files=no
+ac_cv_sys_largefile_CC=no
+ac_cv_type_int=yes
+ac_cv_type_long=yes
+ac_cv_type_short=yes
+
+## ----------------- ##
+## Output variables. ##
+## ----------------- ##
+
+CC='gcc'
+CFLAGS='-fPIC -O3 -funroll-all-loops'
+CPP='gcc -E'
+CPPFLAGS=''
+DEFS='-DHAVE_CONFIG_H'
+ECHO_C=''
+ECHO_N='-n'
+ECHO_T=''
+EGREP='/bin/grep -E'
+EXEEXT=''
+GREP='/bin/grep'
+INSTALL_DATA='${INSTALL} -m 644'
+INSTALL_PROGRAM='${INSTALL}'
+INSTALL_SCRIPT='${INSTALL}'
+LDFLAGS=''
+LIBOBJS=''
+LIBS=''
+LTLIBOBJS=''
+OBJEXT='o'
+PACKAGE_BUGREPORT=''
+PACKAGE_NAME=''
+PACKAGE_STRING=''
+PACKAGE_TARNAME=''
+PACKAGE_VERSION=''
+PATH_SEPARATOR=':'
+RANLIB='ranlib'
+SHELL='/bin/bash'
+ac_ct_CC='gcc'
+bindir='${exec_prefix}/bin'
+build_alias=''
+datadir='${datarootdir}'
+datarootdir='${prefix}/share'
+docdir='${datarootdir}/doc/${PACKAGE}'
+dvidir='${docdir}'
+exec_prefix='${prefix}'
+host_alias=''
+htmldir='${docdir}'
+includedir='${prefix}/include'
+infodir='${datarootdir}/info'
+libdir='${exec_prefix}/lib'
+libexecdir='${exec_prefix}/libexec'
+localedir='${datarootdir}/locale'
+localstatedir='${prefix}/var'
+mandir='${datarootdir}/man'
+oldincludedir='/usr/include'
+pdfdir='${docdir}'
+prefix='/usr/local'
+program_transform_name='s,x,x,'
+psdir='${docdir}'
+sbindir='${exec_prefix}/sbin'
+sharedstatedir='${prefix}/com'
+sysconfdir='${prefix}/etc'
+target_alias=''
+
+## ----------- ##
+## confdefs.h. ##
+## ----------- ##
+
+#define PACKAGE_NAME ""
+#define PACKAGE_TARNAME ""
+#define PACKAGE_VERSION ""
+#define PACKAGE_STRING ""
+#define PACKAGE_BUGREPORT ""
+#define _GNU_SOURCE 1
+#define STDC_HEADERS 1
+#define HAVE_SYS_TYPES_H 1
+#define HAVE_SYS_STAT_H 1
+#define HAVE_STDLIB_H 1
+#define HAVE_STRING_H 1
+#define HAVE_MEMORY_H 1
+#define HAVE_STRINGS_H 1
+#define HAVE_INTTYPES_H 1
+#define HAVE_STDINT_H 1
+#define HAVE_UNISTD_H 1
+#define SIZEOF_SHORT 2
+#define SIZEOF_INT 4
+#define SIZEOF_LONG 8
+#define HAVE_GETOPT_H 1
+#define HAVE_GETOPT_LONG 1
+
+configure: exit 0
diff --git a/lib/liblzf-3.5/config.status b/lib/liblzf-3.5/config.status
new file mode 100755
index 0000000000..2498ff6649
--- /dev/null
+++ b/lib/liblzf-3.5/config.status
@@ -0,0 +1,826 @@
+#! /bin/bash
+# Generated by configure.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+SHELL=${CONFIG_SHELL-/bin/bash}
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+
+# PATH needs CR
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+# Support unset when possible.
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+as_nl='
+'
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ { (exit 1); exit 1; }
+fi
+
+# Work around bugs in pre-3.0 UWIN ksh.
+for as_var in ENV MAIL MAILPATH
+do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+for as_var in \
+ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
+ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
+ LC_TELEPHONE LC_TIME
+do
+ if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
+ eval $as_var=C; export $as_var
+ else
+ ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+ fi
+done
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# CDPATH.
+$as_unset CDPATH
+
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line after each line using $LINENO; the second 'sed'
+ # does the real work. The second script uses 'N' to pair each
+ # line-number line with the line containing $LINENO, and appends
+ # trailing '-' during substitution so that $LINENO is not a special
+ # case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # scripts with optimization help from Paolo Bonzini. Blame Lee
+ # E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in
+-n*)
+ case `echo 'x\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ *) ECHO_C='\c';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir
+fi
+echo >conf$$.file
+if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+# Find out whether ``test -x'' works. Don't use a zero-byte file, as
+# systems may use methods other than mode bits to determine executability.
+cat >conf$$.file <<_ASEOF
+#! /bin/sh
+exit 0
+_ASEOF
+chmod +x conf$$.file
+if test -x conf$$.file >/dev/null 2>&1; then
+ as_executable_p="test -x"
+else
+ as_executable_p=:
+fi
+rm -f conf$$.file
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+
+# Save the log message, to keep $[0] and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by $as_me, which was
+generated by GNU Autoconf 2.60. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+# Files that config.status was made for.
+config_files=" Makefile"
+config_headers=" config.h"
+
+ac_cs_usage="\
+\`$as_me' instantiates files from templates according to the
+current configuration.
+
+Usage: $0 [OPTIONS] [FILE]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number, then exit
+ -q, --quiet do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+ --header=FILE[:TEMPLATE]
+ instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Report bugs to <bug-autoconf@gnu.org>."
+
+ac_cs_version="\
+config.status
+configured by ./configure, generated by GNU Autoconf 2.60,
+ with options \"'CFLAGS=-fPIC'\"
+
+Copyright (C) 2006 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='/home/matei/workspace/jlzf/third_party/liblzf-3.5'
+srcdir='.'
+INSTALL='/usr/bin/install -c'
+# If no file are specified by the user, then we need to provide default
+# value. By we need to know if files were specified by the user.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=*)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ *)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+ echo "$ac_cs_version"; exit ;;
+ --debug | --debu | --deb | --de | --d | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ CONFIG_FILES="$CONFIG_FILES $ac_optarg"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg"
+ ac_need_defaults=false;;
+ --he | --h)
+ # Conflict between --help and --header
+ { echo "$as_me: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; };;
+ --help | --hel | -h )
+ echo "$ac_cs_usage"; exit ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) { echo "$as_me: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; } ;;
+
+ *) ac_config_targets="$ac_config_targets $1"
+ ac_need_defaults=false ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+if $ac_cs_recheck; then
+ echo "running CONFIG_SHELL=/bin/bash /bin/bash ./configure " 'CFLAGS=-fPIC' $ac_configure_extra_args " --no-create --no-recursion" >&6
+ CONFIG_SHELL=/bin/bash
+ export CONFIG_SHELL
+ exec /bin/bash "./configure" 'CFLAGS=-fPIC' $ac_configure_extra_args --no-create --no-recursion
+fi
+
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+ echo "$ac_log"
+} >&5
+
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+ case $ac_config_target in
+ "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;;
+ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+
+ *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
+echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+ test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+ tmp=
+ trap 'exit_status=$?
+ { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
+' 0
+ trap '{ (exit 1); exit 1; }' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+ test -n "$tmp" && test -d "$tmp"
+} ||
+{
+ tmp=./conf$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+} ||
+{
+ echo "$me: cannot create a temporary directory in ." >&2
+ { (exit 1); exit 1; }
+}
+
+#
+# Set up the sed scripts for CONFIG_FILES section.
+#
+
+# No need to generate the scripts if there are no CONFIG_FILES.
+# This happens for instance when ./config.status config.h
+if test -n "$CONFIG_FILES"; then
+
+cat >"$tmp/subs-1.sed" <<\CEOF
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b end
+s,@SHELL@,|#_!!_#|/bin/bash,g
+s,@PATH_SEPARATOR@,|#_!!_#|:,g
+s,@PACKAGE_NAME@,|#_!!_#|,g
+s,@PACKAGE_TARNAME@,|#_!!_#|,g
+s,@PACKAGE_VERSION@,|#_!!_#|,g
+s,@PACKAGE_STRING@,|#_!!_#|,g
+s,@PACKAGE_BUGREPORT@,|#_!!_#|,g
+s,@exec_prefix@,|#_!!_#|${prefix},g
+s,@prefix@,|#_!!_#|/usr/local,g
+s,@program_transform_name@,|#_!!_#|s\,x\,x\,,g
+s,@bindir@,|#_!!_#|${exec_prefix}/bin,g
+s,@sbindir@,|#_!!_#|${exec_prefix}/sbin,g
+s,@libexecdir@,|#_!!_#|${exec_prefix}/libexec,g
+s,@datarootdir@,|#_!!_#|${prefix}/share,g
+s,@datadir@,|#_!!_#|${datarootdir},g
+s,@sysconfdir@,|#_!!_#|${prefix}/etc,g
+s,@sharedstatedir@,|#_!!_#|${prefix}/com,g
+s,@localstatedir@,|#_!!_#|${prefix}/var,g
+s,@includedir@,|#_!!_#|${prefix}/include,g
+s,@oldincludedir@,|#_!!_#|/usr/include,g
+s,@docdir@,|#_!!_#|${datarootdir}/doc/${PACKAGE},g
+s,@infodir@,|#_!!_#|${datarootdir}/info,g
+s,@htmldir@,|#_!!_#|${docdir},g
+s,@dvidir@,|#_!!_#|${docdir},g
+s,@pdfdir@,|#_!!_#|${docdir},g
+s,@psdir@,|#_!!_#|${docdir},g
+s,@libdir@,|#_!!_#|${exec_prefix}/lib,g
+s,@localedir@,|#_!!_#|${datarootdir}/locale,g
+s,@mandir@,|#_!!_#|${datarootdir}/man,g
+s,@DEFS@,|#_!!_#|-DHAVE_CONFIG_H,g
+s,@ECHO_C@,|#_!!_#|,g
+s,@ECHO_N@,|#_!!_#|-n,g
+s,@ECHO_T@,|#_!!_#|,g
+s,@LIBS@,|#_!!_#|,g
+s,@build_alias@,|#_!!_#|,g
+s,@host_alias@,|#_!!_#|,g
+s,@target_alias@,|#_!!_#|,g
+s,@CC@,|#_!!_#|gcc,g
+s,@CFLAGS@,|#_!!_#|-fPIC -O3 -funroll-all-loops,g
+s,@LDFLAGS@,|#_!!_#|,g
+s,@CPPFLAGS@,|#_!!_#|,g
+s,@ac_ct_CC@,|#_!!_#|gcc,g
+s,@EXEEXT@,|#_!!_#|,g
+s,@OBJEXT@,|#_!!_#|o,g
+s,@RANLIB@,|#_!!_#|ranlib,g
+s,@INSTALL_PROGRAM@,|#_!!_#|${INSTALL},g
+s,@INSTALL_SCRIPT@,|#_!!_#|${INSTALL},g
+s,@INSTALL_DATA@,|#_!!_#|${INSTALL} -m 644,g
+s,@CPP@,|#_!!_#|gcc -E,g
+s,@GREP@,|#_!!_#|/bin/grep,g
+s,@EGREP@,|#_!!_#|/bin/grep -E,g
+s,@LIBOBJS@,|#_!!_#|,g
+s,@LTLIBOBJS@,|#_!!_#|,g
+:end
+s/|#_!!_#|//g
+CEOF
+fi # test -n "$CONFIG_FILES"
+
+
+for ac_tag in :F $CONFIG_FILES :H $CONFIG_HEADERS
+do
+ case $ac_tag in
+ :[FHLC]) ac_mode=$ac_tag; continue;;
+ esac
+ case $ac_mode$ac_tag in
+ :[FHL]*:*);;
+ :L* | :C*:*) { { echo "$as_me:$LINENO: error: Invalid tag $ac_tag." >&5
+echo "$as_me: error: Invalid tag $ac_tag." >&2;}
+ { (exit 1); exit 1; }; };;
+ :[FH]-) ac_tag=-:-;;
+ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+ esac
+ ac_save_IFS=$IFS
+ IFS=:
+ set x $ac_tag
+ IFS=$ac_save_IFS
+ shift
+ ac_file=$1
+ shift
+
+ case $ac_mode in
+ :L) ac_source=$1;;
+ :[FH])
+ ac_file_inputs=
+ for ac_f
+ do
+ case $ac_f in
+ -) ac_f="$tmp/stdin";;
+ *) # Look for the file first in the build tree, then in the source tree
+ # (if the path is not absolute). The absolute path cannot be DOS-style,
+ # because $ac_f cannot contain `:'.
+ test -f "$ac_f" ||
+ case $ac_f in
+ [\\/$]*) false;;
+ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+ esac ||
+ { { echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5
+echo "$as_me: error: cannot find input file: $ac_f" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+ ac_file_inputs="$ac_file_inputs $ac_f"
+ done
+
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ configure_input="Generated from "`IFS=:
+ echo $* | sed 's|^[^:]*/||;s|:[^:]*/|, |g'`" by configure."
+ if test x"$ac_file" != x-; then
+ configure_input="$ac_file. $configure_input"
+ { echo "$as_me:$LINENO: creating $ac_file" >&5
+echo "$as_me: creating $ac_file" >&6;}
+ fi
+
+ case $ac_tag in
+ *:-:* | *:-) cat >"$tmp/stdin";;
+ esac
+ ;;
+ esac
+
+ ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ { as_dir="$ac_dir"
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || { { echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5
+echo "$as_me: error: cannot create directory $as_dir" >&2;}
+ { (exit 1); exit 1; }; }; }
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+ case $ac_mode in
+ :F)
+ #
+ # CONFIG_FILE
+ #
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+ esac
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+
+case `sed -n '/datarootdir/ {
+ p
+ q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p
+' $ac_file_inputs` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+ { echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+ ac_datarootdir_hack='
+ s&@datadir@&${datarootdir}&g
+ s&@docdir@&${datarootdir}/doc/${PACKAGE}&g
+ s&@infodir@&${datarootdir}/info&g
+ s&@localedir@&${datarootdir}/locale&g
+ s&@mandir@&${datarootdir}/man&g
+ s&\${datarootdir}&${prefix}/share&g' ;;
+esac
+ sed "/^[ ]*VPATH[ ]*=/{
+s/:*\$(srcdir):*/:/
+s/:*\${srcdir}:*/:/
+s/:*@srcdir@:*/:/
+s/^\([^=]*=[ ]*\):*/\1/
+s/:*$//
+s/^[^=]*=[ ]*$//
+}
+
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s&@configure_input@&$configure_input&;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+$ac_datarootdir_hack
+" $ac_file_inputs | sed -f "$tmp/subs-1.sed" >$tmp/out
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
+ { echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined." >&5
+echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined." >&2;}
+
+ rm -f "$tmp/stdin"
+ case $ac_file in
+ -) cat "$tmp/out"; rm -f "$tmp/out";;
+ *) rm -f "$ac_file"; mv "$tmp/out" $ac_file;;
+ esac
+ ;;
+ :H)
+ #
+ # CONFIG_HEADER
+ #
+ # First, check the format of the line:
+ cat >"$tmp/defines.sed" <<\CEOF
+/^[ ]*#[ ]*undef[ ][ ]*[_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ][_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789]*[ ]*$/b def
+/^[ ]*#[ ]*define[ ][ ]*[_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ][_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789]*[( ]/b def
+b
+:def
+s/$/ /
+s,^\([ #]*\)[^ ]*\([ ]*PACKAGE_NAME\)[ (].*,\1define\2 "" ,
+s,^\([ #]*\)[^ ]*\([ ]*PACKAGE_TARNAME\)[ (].*,\1define\2 "" ,
+s,^\([ #]*\)[^ ]*\([ ]*PACKAGE_VERSION\)[ (].*,\1define\2 "" ,
+s,^\([ #]*\)[^ ]*\([ ]*PACKAGE_STRING\)[ (].*,\1define\2 "" ,
+s,^\([ #]*\)[^ ]*\([ ]*PACKAGE_BUGREPORT\)[ (].*,\1define\2 "" ,
+s,^\([ #]*\)[^ ]*\([ ]*_GNU_SOURCE\)[ (].*,\1define\2 1 ,
+s,^\([ #]*\)[^ ]*\([ ]*STDC_HEADERS\)[ (].*,\1define\2 1 ,
+s,^\([ #]*\)[^ ]*\([ ]*HAVE_SYS_TYPES_H\)[ (].*,\1define\2 1 ,
+s,^\([ #]*\)[^ ]*\([ ]*HAVE_SYS_STAT_H\)[ (].*,\1define\2 1 ,
+s,^\([ #]*\)[^ ]*\([ ]*HAVE_STDLIB_H\)[ (].*,\1define\2 1 ,
+s,^\([ #]*\)[^ ]*\([ ]*HAVE_STRING_H\)[ (].*,\1define\2 1 ,
+s,^\([ #]*\)[^ ]*\([ ]*HAVE_MEMORY_H\)[ (].*,\1define\2 1 ,
+s,^\([ #]*\)[^ ]*\([ ]*HAVE_STRINGS_H\)[ (].*,\1define\2 1 ,
+s,^\([ #]*\)[^ ]*\([ ]*HAVE_INTTYPES_H\)[ (].*,\1define\2 1 ,
+s,^\([ #]*\)[^ ]*\([ ]*HAVE_STDINT_H\)[ (].*,\1define\2 1 ,
+s,^\([ #]*\)[^ ]*\([ ]*HAVE_UNISTD_H\)[ (].*,\1define\2 1 ,
+s,^\([ #]*\)[^ ]*\([ ]*SIZEOF_SHORT\)[ (].*,\1define\2 2 ,
+s,^\([ #]*\)[^ ]*\([ ]*SIZEOF_INT\)[ (].*,\1define\2 4 ,
+s,^\([ #]*\)[^ ]*\([ ]*SIZEOF_LONG\)[ (].*,\1define\2 8 ,
+s,^\([ #]*\)[^ ]*\([ ]*HAVE_GETOPT_H\)[ (].*,\1define\2 1 ,
+s,^\([ #]*\)[^ ]*\([ ]*HAVE_GETOPT_LONG\)[ (].*,\1define\2 1 ,
+s/ $//
+s,^[ #]*u.*,/* & */,
+CEOF
+ sed -f "$tmp/defines.sed" $ac_file_inputs >"$tmp/out1"
+ac_result="$tmp/out1"
+ if test x"$ac_file" != x-; then
+ echo "/* $configure_input */" >"$tmp/config.h"
+ cat "$ac_result" >>"$tmp/config.h"
+ if diff $ac_file "$tmp/config.h" >/dev/null 2>&1; then
+ { echo "$as_me:$LINENO: $ac_file is unchanged" >&5
+echo "$as_me: $ac_file is unchanged" >&6;}
+ else
+ rm -f $ac_file
+ mv "$tmp/config.h" $ac_file
+ fi
+ else
+ echo "/* $configure_input */"
+ cat "$ac_result"
+ fi
+ rm -f "$tmp/out12"
+ ;;
+
+
+ esac
+
+done # for ac_tag
+
+
+{ (exit 0); exit 0; }
diff --git a/lib/liblzf-3.5/configure b/lib/liblzf-3.5/configure
new file mode 100755
index 0000000000..7a3a2b25c0
--- /dev/null
+++ b/lib/liblzf-3.5/configure
@@ -0,0 +1,7871 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.60.
+#
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+# 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+
+# PATH needs CR
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+# Support unset when possible.
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+as_nl='
+'
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ { (exit 1); exit 1; }
+fi
+
+# Work around bugs in pre-3.0 UWIN ksh.
+for as_var in ENV MAIL MAILPATH
+do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+for as_var in \
+ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
+ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
+ LC_TELEPHONE LC_TIME
+do
+ if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
+ eval $as_var=C; export $as_var
+ else
+ ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+ fi
+done
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# CDPATH.
+$as_unset CDPATH
+
+
+if test "x$CONFIG_SHELL" = x; then
+ if (eval ":") 2>/dev/null; then
+ as_have_required=yes
+else
+ as_have_required=no
+fi
+
+ if test $as_have_required = yes && (eval ":
+(as_func_return () {
+ (exit \$1)
+}
+as_func_success () {
+ as_func_return 0
+}
+as_func_failure () {
+ as_func_return 1
+}
+as_func_ret_success () {
+ return 0
+}
+as_func_ret_failure () {
+ return 1
+}
+
+exitcode=0
+if as_func_success; then
+ :
+else
+ exitcode=1
+ echo as_func_success failed.
+fi
+
+if as_func_failure; then
+ exitcode=1
+ echo as_func_failure succeeded.
+fi
+
+if as_func_ret_success; then
+ :
+else
+ exitcode=1
+ echo as_func_ret_success failed.
+fi
+
+if as_func_ret_failure; then
+ exitcode=1
+ echo as_func_ret_failure succeeded.
+fi
+
+if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
+ :
+else
+ exitcode=1
+ echo positional parameters were not saved.
+fi
+
+test \$exitcode = 0) || { (exit 1); exit 1; }
+
+(
+ as_lineno_1=\$LINENO
+ as_lineno_2=\$LINENO
+ test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" &&
+ test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; }
+") 2> /dev/null; then
+ :
+else
+ as_candidate_shells=
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in /usr/bin/posix$PATH_SEPARATOR/bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ case $as_dir in
+ /*)
+ for as_base in sh bash ksh sh5; do
+ as_candidate_shells="$as_candidate_shells $as_dir/$as_base"
+ done;;
+ esac
+done
+IFS=$as_save_IFS
+
+
+ for as_shell in $as_candidate_shells $SHELL; do
+ # Try only shells that exist, to save several forks.
+ if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+ { ("$as_shell") 2> /dev/null <<\_ASEOF
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+:
+_ASEOF
+}; then
+ CONFIG_SHELL=$as_shell
+ as_have_required=yes
+ if { "$as_shell" 2> /dev/null <<\_ASEOF
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+:
+(as_func_return () {
+ (exit $1)
+}
+as_func_success () {
+ as_func_return 0
+}
+as_func_failure () {
+ as_func_return 1
+}
+as_func_ret_success () {
+ return 0
+}
+as_func_ret_failure () {
+ return 1
+}
+
+exitcode=0
+if as_func_success; then
+ :
+else
+ exitcode=1
+ echo as_func_success failed.
+fi
+
+if as_func_failure; then
+ exitcode=1
+ echo as_func_failure succeeded.
+fi
+
+if as_func_ret_success; then
+ :
+else
+ exitcode=1
+ echo as_func_ret_success failed.
+fi
+
+if as_func_ret_failure; then
+ exitcode=1
+ echo as_func_ret_failure succeeded.
+fi
+
+if ( set x; as_func_ret_success y && test x = "$1" ); then
+ :
+else
+ exitcode=1
+ echo positional parameters were not saved.
+fi
+
+test $exitcode = 0) || { (exit 1); exit 1; }
+
+(
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; }
+
+_ASEOF
+}; then
+ break
+fi
+
+fi
+
+ done
+
+ if test "x$CONFIG_SHELL" != x; then
+ for as_var in BASH_ENV ENV
+ do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+ done
+ export CONFIG_SHELL
+ exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
+fi
+
+
+ if test $as_have_required = no; then
+ echo This script requires a shell more modern than all the
+ echo shells that I found on your system. Please install a
+ echo modern shell, or manually run the script under such a
+ echo shell if you do have one.
+ { (exit 1); exit 1; }
+fi
+
+
+fi
+
+fi
+
+
+
+(eval "as_func_return () {
+ (exit \$1)
+}
+as_func_success () {
+ as_func_return 0
+}
+as_func_failure () {
+ as_func_return 1
+}
+as_func_ret_success () {
+ return 0
+}
+as_func_ret_failure () {
+ return 1
+}
+
+exitcode=0
+if as_func_success; then
+ :
+else
+ exitcode=1
+ echo as_func_success failed.
+fi
+
+if as_func_failure; then
+ exitcode=1
+ echo as_func_failure succeeded.
+fi
+
+if as_func_ret_success; then
+ :
+else
+ exitcode=1
+ echo as_func_ret_success failed.
+fi
+
+if as_func_ret_failure; then
+ exitcode=1
+ echo as_func_ret_failure succeeded.
+fi
+
+if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
+ :
+else
+ exitcode=1
+ echo positional parameters were not saved.
+fi
+
+test \$exitcode = 0") || {
+ echo No shell found that supports shell functions.
+ echo Please tell autoconf@gnu.org about your system,
+ echo including any error possibly output before this
+ echo message
+}
+
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line after each line using $LINENO; the second 'sed'
+ # does the real work. The second script uses 'N' to pair each
+ # line-number line with the line containing $LINENO, and appends
+ # trailing '-' during substitution so that $LINENO is not a special
+ # case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # scripts with optimization help from Paolo Bonzini. Blame Lee
+ # E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in
+-n*)
+ case `echo 'x\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ *) ECHO_C='\c';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir
+fi
+echo >conf$$.file
+if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+# Find out whether ``test -x'' works. Don't use a zero-byte file, as
+# systems may use methods other than mode bits to determine executability.
+cat >conf$$.file <<_ASEOF
+#! /bin/sh
+exit 0
+_ASEOF
+chmod +x conf$$.file
+if test -x conf$$.file >/dev/null 2>&1; then
+ as_executable_p="test -x"
+else
+ as_executable_p=:
+fi
+rm -f conf$$.file
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+
+exec 7<&0 </dev/null 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+# Identity of this package.
+PACKAGE_NAME=
+PACKAGE_TARNAME=
+PACKAGE_VERSION=
+PACKAGE_STRING=
+PACKAGE_BUGREPORT=
+
+ac_unique_file="lzfP.h"
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#if HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#if HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#if STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# if HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+#if HAVE_STRING_H
+# if !STDC_HEADERS && HAVE_MEMORY_H
+# include <memory.h>
+# endif
+# include <string.h>
+#endif
+#if HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#if HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#if HAVE_STDINT_H
+# include <stdint.h>
+#endif
+#if HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_subst_vars='SHELL
+PATH_SEPARATOR
+PACKAGE_NAME
+PACKAGE_TARNAME
+PACKAGE_VERSION
+PACKAGE_STRING
+PACKAGE_BUGREPORT
+exec_prefix
+prefix
+program_transform_name
+bindir
+sbindir
+libexecdir
+datarootdir
+datadir
+sysconfdir
+sharedstatedir
+localstatedir
+includedir
+oldincludedir
+docdir
+infodir
+htmldir
+dvidir
+pdfdir
+psdir
+libdir
+localedir
+mandir
+DEFS
+ECHO_C
+ECHO_N
+ECHO_T
+LIBS
+build_alias
+host_alias
+target_alias
+CC
+CFLAGS
+LDFLAGS
+CPPFLAGS
+ac_ct_CC
+EXEEXT
+OBJEXT
+RANLIB
+INSTALL_PROGRAM
+INSTALL_SCRIPT
+INSTALL_DATA
+CPP
+GREP
+EGREP
+LIBOBJS
+LTLIBOBJS'
+ac_subst_files=''
+ ac_precious_vars='build_alias
+host_alias
+target_alias
+CC
+CFLAGS
+LDFLAGS
+CPPFLAGS
+CPP'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+ # If the previous option needs an argument, assign it.
+ if test -n "$ac_prev"; then
+ eval $ac_prev=\$ac_option
+ ac_prev=
+ continue
+ fi
+
+ case $ac_option in
+ *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *) ac_optarg=yes ;;
+ esac
+
+ # Accept the important Cygnus configure options, so we can diagnose typos.
+
+ case $ac_dashdash$ac_option in
+ --)
+ ac_dashdash=yes ;;
+
+ -bindir | --bindir | --bindi | --bind | --bin | --bi)
+ ac_prev=bindir ;;
+ -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+ bindir=$ac_optarg ;;
+
+ -build | --build | --buil | --bui | --bu)
+ ac_prev=build_alias ;;
+ -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+ build_alias=$ac_optarg ;;
+
+ -cache-file | --cache-file | --cache-fil | --cache-fi \
+ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+ ac_prev=cache_file ;;
+ -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+ cache_file=$ac_optarg ;;
+
+ --config-cache | -C)
+ cache_file=config.cache ;;
+
+ -datadir | --datadir | --datadi | --datad)
+ ac_prev=datadir ;;
+ -datadir=* | --datadir=* | --datadi=* | --datad=*)
+ datadir=$ac_optarg ;;
+
+ -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+ | --dataroo | --dataro | --datar)
+ ac_prev=datarootdir ;;
+ -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+ datarootdir=$ac_optarg ;;
+
+ -disable-* | --disable-*)
+ ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid feature name: $ac_feature" >&2
+ { (exit 1); exit 1; }; }
+ ac_feature=`echo $ac_feature | sed 's/-/_/g'`
+ eval enable_$ac_feature=no ;;
+
+ -docdir | --docdir | --docdi | --doc | --do)
+ ac_prev=docdir ;;
+ -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+ docdir=$ac_optarg ;;
+
+ -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+ ac_prev=dvidir ;;
+ -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+ dvidir=$ac_optarg ;;
+
+ -enable-* | --enable-*)
+ ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_feature" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid feature name: $ac_feature" >&2
+ { (exit 1); exit 1; }; }
+ ac_feature=`echo $ac_feature | sed 's/-/_/g'`
+ eval enable_$ac_feature=\$ac_optarg ;;
+
+ -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+ | --exec | --exe | --ex)
+ ac_prev=exec_prefix ;;
+ -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+ | --exec=* | --exe=* | --ex=*)
+ exec_prefix=$ac_optarg ;;
+
+ -gas | --gas | --ga | --g)
+ # Obsolete; use --with-gas.
+ with_gas=yes ;;
+
+ -help | --help | --hel | --he | -h)
+ ac_init_help=long ;;
+ -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+ ac_init_help=recursive ;;
+ -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+ ac_init_help=short ;;
+
+ -host | --host | --hos | --ho)
+ ac_prev=host_alias ;;
+ -host=* | --host=* | --hos=* | --ho=*)
+ host_alias=$ac_optarg ;;
+
+ -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+ ac_prev=htmldir ;;
+ -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+ | --ht=*)
+ htmldir=$ac_optarg ;;
+
+ -includedir | --includedir | --includedi | --included | --include \
+ | --includ | --inclu | --incl | --inc)
+ ac_prev=includedir ;;
+ -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+ | --includ=* | --inclu=* | --incl=* | --inc=*)
+ includedir=$ac_optarg ;;
+
+ -infodir | --infodir | --infodi | --infod | --info | --inf)
+ ac_prev=infodir ;;
+ -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+ infodir=$ac_optarg ;;
+
+ -libdir | --libdir | --libdi | --libd)
+ ac_prev=libdir ;;
+ -libdir=* | --libdir=* | --libdi=* | --libd=*)
+ libdir=$ac_optarg ;;
+
+ -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+ | --libexe | --libex | --libe)
+ ac_prev=libexecdir ;;
+ -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+ | --libexe=* | --libex=* | --libe=*)
+ libexecdir=$ac_optarg ;;
+
+ -localedir | --localedir | --localedi | --localed | --locale)
+ ac_prev=localedir ;;
+ -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+ localedir=$ac_optarg ;;
+
+ -localstatedir | --localstatedir | --localstatedi | --localstated \
+ | --localstate | --localstat | --localsta | --localst | --locals)
+ ac_prev=localstatedir ;;
+ -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+ localstatedir=$ac_optarg ;;
+
+ -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+ ac_prev=mandir ;;
+ -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+ mandir=$ac_optarg ;;
+
+ -nfp | --nfp | --nf)
+ # Obsolete; use --without-fp.
+ with_fp=no ;;
+
+ -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+ | --no-cr | --no-c | -n)
+ no_create=yes ;;
+
+ -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+ no_recursion=yes ;;
+
+ -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+ | --oldin | --oldi | --old | --ol | --o)
+ ac_prev=oldincludedir ;;
+ -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+ oldincludedir=$ac_optarg ;;
+
+ -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+ ac_prev=prefix ;;
+ -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+ prefix=$ac_optarg ;;
+
+ -program-prefix | --program-prefix | --program-prefi | --program-pref \
+ | --program-pre | --program-pr | --program-p)
+ ac_prev=program_prefix ;;
+ -program-prefix=* | --program-prefix=* | --program-prefi=* \
+ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+ program_prefix=$ac_optarg ;;
+
+ -program-suffix | --program-suffix | --program-suffi | --program-suff \
+ | --program-suf | --program-su | --program-s)
+ ac_prev=program_suffix ;;
+ -program-suffix=* | --program-suffix=* | --program-suffi=* \
+ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+ program_suffix=$ac_optarg ;;
+
+ -program-transform-name | --program-transform-name \
+ | --program-transform-nam | --program-transform-na \
+ | --program-transform-n | --program-transform- \
+ | --program-transform | --program-transfor \
+ | --program-transfo | --program-transf \
+ | --program-trans | --program-tran \
+ | --progr-tra | --program-tr | --program-t)
+ ac_prev=program_transform_name ;;
+ -program-transform-name=* | --program-transform-name=* \
+ | --program-transform-nam=* | --program-transform-na=* \
+ | --program-transform-n=* | --program-transform-=* \
+ | --program-transform=* | --program-transfor=* \
+ | --program-transfo=* | --program-transf=* \
+ | --program-trans=* | --program-tran=* \
+ | --progr-tra=* | --program-tr=* | --program-t=*)
+ program_transform_name=$ac_optarg ;;
+
+ -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+ ac_prev=pdfdir ;;
+ -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+ pdfdir=$ac_optarg ;;
+
+ -psdir | --psdir | --psdi | --psd | --ps)
+ ac_prev=psdir ;;
+ -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+ psdir=$ac_optarg ;;
+
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ silent=yes ;;
+
+ -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+ ac_prev=sbindir ;;
+ -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+ | --sbi=* | --sb=*)
+ sbindir=$ac_optarg ;;
+
+ -sharedstatedir | --sharedstatedir | --sharedstatedi \
+ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+ | --sharedst | --shareds | --shared | --share | --shar \
+ | --sha | --sh)
+ ac_prev=sharedstatedir ;;
+ -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+ | --sha=* | --sh=*)
+ sharedstatedir=$ac_optarg ;;
+
+ -site | --site | --sit)
+ ac_prev=site ;;
+ -site=* | --site=* | --sit=*)
+ site=$ac_optarg ;;
+
+ -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+ ac_prev=srcdir ;;
+ -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+ srcdir=$ac_optarg ;;
+
+ -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+ | --syscon | --sysco | --sysc | --sys | --sy)
+ ac_prev=sysconfdir ;;
+ -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+ sysconfdir=$ac_optarg ;;
+
+ -target | --target | --targe | --targ | --tar | --ta | --t)
+ ac_prev=target_alias ;;
+ -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+ target_alias=$ac_optarg ;;
+
+ -v | -verbose | --verbose | --verbos | --verbo | --verb)
+ verbose=yes ;;
+
+ -version | --version | --versio | --versi | --vers | -V)
+ ac_init_version=: ;;
+
+ -with-* | --with-*)
+ ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid package name: $ac_package" >&2
+ { (exit 1); exit 1; }; }
+ ac_package=`echo $ac_package| sed 's/-/_/g'`
+ eval with_$ac_package=\$ac_optarg ;;
+
+ -without-* | --without-*)
+ ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_package" : ".*[^-_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid package name: $ac_package" >&2
+ { (exit 1); exit 1; }; }
+ ac_package=`echo $ac_package | sed 's/-/_/g'`
+ eval with_$ac_package=no ;;
+
+ --x)
+ # Obsolete; use --with-x.
+ with_x=yes ;;
+
+ -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+ | --x-incl | --x-inc | --x-in | --x-i)
+ ac_prev=x_includes ;;
+ -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+ x_includes=$ac_optarg ;;
+
+ -x-libraries | --x-libraries | --x-librarie | --x-librari \
+ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+ ac_prev=x_libraries ;;
+ -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+ x_libraries=$ac_optarg ;;
+
+ -*) { echo "$as_me: error: unrecognized option: $ac_option
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; }
+ ;;
+
+ *=*)
+ ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+ # Reject names that are not valid shell variable names.
+ expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null &&
+ { echo "$as_me: error: invalid variable name: $ac_envvar" >&2
+ { (exit 1); exit 1; }; }
+ eval $ac_envvar=\$ac_optarg
+ export $ac_envvar ;;
+
+ *)
+ # FIXME: should be removed in autoconf 3.0.
+ echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+ expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+ echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+ : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+ ;;
+
+ esac
+done
+
+if test -n "$ac_prev"; then
+ ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+ { echo "$as_me: error: missing argument to $ac_option" >&2
+ { (exit 1); exit 1; }; }
+fi
+
+# Be sure to have absolute directory names.
+for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
+ datadir sysconfdir sharedstatedir localstatedir includedir \
+ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+ libdir localedir mandir
+do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ [\\/$]* | ?:[\\/]* ) continue;;
+ NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+ esac
+ { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
+ { (exit 1); exit 1; }; }
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+ if test "x$build_alias" = x; then
+ cross_compiling=maybe
+ echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
+ If a cross compiler is detected then cross compile mode will be used." >&2
+ elif test "x$build_alias" != "x$host_alias"; then
+ cross_compiling=yes
+ fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+ { echo "$as_me: error: Working directory cannot be determined" >&2
+ { (exit 1); exit 1; }; }
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+ { echo "$as_me: error: pwd does not report name of working directory" >&2
+ { (exit 1); exit 1; }; }
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+ ac_srcdir_defaulted=yes
+ # Try the directory containing this script, then the parent directory.
+ ac_confdir=`$as_dirname -- "$0" ||
+$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$0" : 'X\(//\)[^/]' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+echo X"$0" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ srcdir=$ac_confdir
+ if test ! -r "$srcdir/$ac_unique_file"; then
+ srcdir=..
+ fi
+else
+ ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+ test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+ { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2
+ { (exit 1); exit 1; }; }
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+ cd "$srcdir" && test -r "./$ac_unique_file" || { echo "$as_me: error: $ac_msg" >&2
+ { (exit 1); exit 1; }; }
+ pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+ srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+ eval ac_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_env_${ac_var}_value=\$${ac_var}
+ eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+ eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+ # Omit some internal or obsolete options to make the list less imposing.
+ # This message is too long to be a string in the A/UX 3.1 sh.
+ cat <<_ACEOF
+\`configure' configures this package to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE. See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+ -h, --help display this help and exit
+ --help=short display options specific to this package
+ --help=recursive display the short help of all the included packages
+ -V, --version display version information and exit
+ -q, --quiet, --silent do not print \`checking...' messages
+ --cache-file=FILE cache test results in FILE [disabled]
+ -C, --config-cache alias for \`--cache-file=config.cache'
+ -n, --no-create do not create output files
+ --srcdir=DIR find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+ --prefix=PREFIX install architecture-independent files in PREFIX
+ [$ac_default_prefix]
+ --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX
+ [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+ --bindir=DIR user executables [EPREFIX/bin]
+ --sbindir=DIR system admin executables [EPREFIX/sbin]
+ --libexecdir=DIR program executables [EPREFIX/libexec]
+ --sysconfdir=DIR read-only single-machine data [PREFIX/etc]
+ --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
+ --localstatedir=DIR modifiable single-machine data [PREFIX/var]
+ --libdir=DIR object code libraries [EPREFIX/lib]
+ --includedir=DIR C header files [PREFIX/include]
+ --oldincludedir=DIR C header files for non-gcc [/usr/include]
+ --datarootdir=DIR read-only arch.-independent data root [PREFIX/share]
+ --datadir=DIR read-only architecture-independent data [DATAROOTDIR]
+ --infodir=DIR info documentation [DATAROOTDIR/info]
+ --localedir=DIR locale-dependent data [DATAROOTDIR/locale]
+ --mandir=DIR man documentation [DATAROOTDIR/man]
+ --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE]
+ --htmldir=DIR html documentation [DOCDIR]
+ --dvidir=DIR dvi documentation [DOCDIR]
+ --pdfdir=DIR pdf documentation [DOCDIR]
+ --psdir=DIR ps documentation [DOCDIR]
+_ACEOF
+
+ cat <<\_ACEOF
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+
+ cat <<\_ACEOF
+
+Optional Features:
+ --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
+ --enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --disable-largefile omit support for large files
+
+Some influential environment variables:
+ CC C compiler command
+ CFLAGS C compiler flags
+ LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a
+ nonstandard directory <lib dir>
+ CPPFLAGS C/C++/Objective C preprocessor flags, e.g. -I<include dir> if
+ you have headers in a nonstandard directory <include dir>
+ CPP C preprocessor
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+ # If there are subdirs, report their specific --help.
+ for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+ test -d "$ac_dir" || continue
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+ cd "$ac_dir" || { ac_status=$?; continue; }
+ # Check for guested configure.
+ if test -f "$ac_srcdir/configure.gnu"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+ elif test -f "$ac_srcdir/configure"; then
+ echo &&
+ $SHELL "$ac_srcdir/configure" --help=recursive
+ else
+ echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+ fi || ac_status=$?
+ cd "$ac_pwd" || { ac_status=$?; break; }
+ done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+ cat <<\_ACEOF
+configure
+generated by GNU Autoconf 2.60
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+ exit
+fi
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by $as_me, which was
+generated by GNU Autoconf 2.60. Invocation command line was
+
+ $ $0 $@
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown`
+
+/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown`
+/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown`
+/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ echo "PATH: $as_dir"
+done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+ for ac_arg
+ do
+ case $ac_arg in
+ -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil)
+ continue ;;
+ *\'*)
+ ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ esac
+ case $ac_pass in
+ 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;;
+ 2)
+ ac_configure_args1="$ac_configure_args1 '$ac_arg'"
+ if test $ac_must_keep_next = true; then
+ ac_must_keep_next=false # Got value, back to normal.
+ else
+ case $ac_arg in
+ *=* | --config-cache | -C | -disable-* | --disable-* \
+ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+ | -with-* | --with-* | -without-* | --without-* | --x)
+ case "$ac_configure_args0 " in
+ "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+ esac
+ ;;
+ -* ) ac_must_keep_next=true ;;
+ esac
+ fi
+ ac_configure_args="$ac_configure_args '$ac_arg'"
+ ;;
+ esac
+ done
+done
+$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; }
+$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; }
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log. We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+ # Save into config.log some information that might help in debugging.
+ {
+ echo
+
+ cat <<\_ASBOX
+## ---------------- ##
+## Cache variables. ##
+## ---------------- ##
+_ASBOX
+ echo
+ # The following way of writing the cache mishandles newlines in values,
+(
+ for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5
+echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ *) $as_unset $ac_var ;;
+ esac ;;
+ esac
+ done
+ (set) 2>&1 |
+ case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ sed -n \
+ "s/'\''/'\''\\\\'\'''\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+ ;; #(
+ *)
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+)
+ echo
+
+ cat <<\_ASBOX
+## ----------------- ##
+## Output variables. ##
+## ----------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_vars
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+
+ if test -n "$ac_subst_files"; then
+ cat <<\_ASBOX
+## ------------------- ##
+## File substitutions. ##
+## ------------------- ##
+_ASBOX
+ echo
+ for ac_var in $ac_subst_files
+ do
+ eval ac_val=\$$ac_var
+ case $ac_val in
+ *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ esac
+ echo "$ac_var='\''$ac_val'\''"
+ done | sort
+ echo
+ fi
+
+ if test -s confdefs.h; then
+ cat <<\_ASBOX
+## ----------- ##
+## confdefs.h. ##
+## ----------- ##
+_ASBOX
+ echo
+ cat confdefs.h
+ echo
+ fi
+ test "$ac_signal" != 0 &&
+ echo "$as_me: caught signal $ac_signal"
+ echo "$as_me: exit $exit_status"
+ } >&5
+ rm -f core *.core core.conftest.* &&
+ rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+ exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+ trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer explicitly selected file to automatically selected ones.
+if test -n "$CONFIG_SITE"; then
+ set x "$CONFIG_SITE"
+elif test "x$prefix" != xNONE; then
+ set x "$prefix/share/config.site" "$prefix/etc/config.site"
+else
+ set x "$ac_default_prefix/share/config.site" \
+ "$ac_default_prefix/etc/config.site"
+fi
+shift
+for ac_site_file
+do
+ if test -r "$ac_site_file"; then
+ { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5
+echo "$as_me: loading site script $ac_site_file" >&6;}
+ sed 's/^/| /' "$ac_site_file" >&5
+ . "$ac_site_file"
+ fi
+done
+
+if test -r "$cache_file"; then
+ # Some versions of bash will fail to source /dev/null (special
+ # files actually), so we avoid doing that.
+ if test -f "$cache_file"; then
+ { echo "$as_me:$LINENO: loading cache $cache_file" >&5
+echo "$as_me: loading cache $cache_file" >&6;}
+ case $cache_file in
+ [\\/]* | ?:[\\/]* ) . "$cache_file";;
+ *) . "./$cache_file";;
+ esac
+ fi
+else
+ { echo "$as_me:$LINENO: creating cache $cache_file" >&5
+echo "$as_me: creating cache $cache_file" >&6;}
+ >$cache_file
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+ eval ac_old_set=\$ac_cv_env_${ac_var}_set
+ eval ac_new_set=\$ac_env_${ac_var}_set
+ eval ac_old_val=\$ac_cv_env_${ac_var}_value
+ eval ac_new_val=\$ac_env_${ac_var}_value
+ case $ac_old_set,$ac_new_set in
+ set,)
+ { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,set)
+ { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5
+echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+ ac_cache_corrupted=: ;;
+ ,);;
+ *)
+ if test "x$ac_old_val" != "x$ac_new_val"; then
+ { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5
+echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+ { echo "$as_me:$LINENO: former value: $ac_old_val" >&5
+echo "$as_me: former value: $ac_old_val" >&2;}
+ { echo "$as_me:$LINENO: current value: $ac_new_val" >&5
+echo "$as_me: current value: $ac_new_val" >&2;}
+ ac_cache_corrupted=:
+ fi;;
+ esac
+ # Pass precious variables to config.status.
+ if test "$ac_new_set" = set; then
+ case $ac_new_val in
+ *\'*) ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+ *) ac_arg=$ac_var=$ac_new_val ;;
+ esac
+ case " $ac_configure_args " in
+ *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy.
+ *) ac_configure_args="$ac_configure_args '$ac_arg'" ;;
+ esac
+ fi
+done
+if $ac_cache_corrupted; then
+ { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5
+echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+ { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5
+echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+
+ac_config_headers="$ac_config_headers config.h"
+
+
+
+cat >>confdefs.h <<\_ACEOF
+#define _GNU_SOURCE 1
+_ACEOF
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}gcc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="gcc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+echo "${ECHO_T}$ac_ct_CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&5
+echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}cc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+ fi
+fi
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# != 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+ fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in cl.exe
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cl.exe
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+echo "${ECHO_T}$ac_ct_CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CC" && break
+done
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&5
+echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+fi
+
+fi
+
+
+test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&5
+echo "$as_me: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+
+# Provide some information about the compiler.
+echo "$as_me:$LINENO: checking for C compiler version" >&5
+ac_compiler=`set X $ac_compile; echo $2`
+{ (ac_try="$ac_compiler --version >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compiler --version >&5") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (ac_try="$ac_compiler -v >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compiler -v >&5") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (ac_try="$ac_compiler -V >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compiler -V >&5") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ echo "$as_me:$LINENO: checking for C compiler default output file name" >&5
+echo $ECHO_N "checking for C compiler default output file name... $ECHO_C" >&6; }
+ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+#
+# List of possible output files, starting from the most likely.
+# The algorithm is not robust to junk in `.', hence go to wildcards (a.*)
+# only as a last resort. b.out is created by i960 compilers.
+ac_files='a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out'
+#
+# The IRIX 6 linker writes into existing files which may not be
+# executable, retaining their permissions. Remove them first so a
+# subsequent execution test works.
+ac_rmfiles=
+for ac_file in $ac_files
+do
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) ;;
+ * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+ esac
+done
+rm -f $ac_rmfiles
+
+if { (ac_try="$ac_link_default"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_link_default") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile. We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files
+do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj )
+ ;;
+ [ab].out )
+ # We found the default executable, but exeext='' is most
+ # certainly right.
+ break;;
+ *.* )
+ if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+ then :; else
+ ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ fi
+ # We set ac_cv_exeext here because the later test for it is not
+ # safe: cross compilers may not add the suffix if given an `-o'
+ # argument, so we may need to know it at that point already.
+ # Even if this section looks crufty: it has the advantage of
+ # actually working.
+ break;;
+ * )
+ break;;
+ esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { echo "$as_me:$LINENO: error: C compiler cannot create executables
+See \`config.log' for more details." >&5
+echo "$as_me: error: C compiler cannot create executables
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; }
+fi
+
+ac_exeext=$ac_cv_exeext
+{ echo "$as_me:$LINENO: result: $ac_file" >&5
+echo "${ECHO_T}$ac_file" >&6; }
+
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ echo "$as_me:$LINENO: checking whether the C compiler works" >&5
+echo $ECHO_N "checking whether the C compiler works... $ECHO_C" >&6; }
+# FIXME: These cross compiler hacks should be removed for Autoconf 3.0
+# If not cross compiling, check that we can run a simple program.
+if test "$cross_compiling" != yes; then
+ if { ac_try='./$ac_file'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ cross_compiling=no
+ else
+ if test "$cross_compiling" = maybe; then
+ cross_compiling=yes
+ else
+ { { echo "$as_me:$LINENO: error: cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ fi
+fi
+{ echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6; }
+
+rm -f a.out a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+# Check that the compiler produces executables we can run. If not, either
+# the compiler is broken, or we cross compile.
+{ echo "$as_me:$LINENO: checking whether we are cross compiling" >&5
+echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6; }
+{ echo "$as_me:$LINENO: result: $cross_compiling" >&5
+echo "${ECHO_T}$cross_compiling" >&6; }
+
+{ echo "$as_me:$LINENO: checking for suffix of executables" >&5
+echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6; }
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+ test -f "$ac_file" || continue
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) ;;
+ *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ break;;
+ * ) break;;
+ esac
+done
+else
+ { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+rm -f conftest$ac_cv_exeext
+{ echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5
+echo "${ECHO_T}$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+{ echo "$as_me:$LINENO: checking for suffix of object files" >&5
+echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6; }
+if test "${ac_cv_objext+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; then
+ for ac_file in conftest.o conftest.obj conftest.*; do
+ test -f "$ac_file" || continue;
+ case $ac_file in
+ *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf ) ;;
+ *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+ break;;
+ esac
+done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_objext" >&5
+echo "${ECHO_T}$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5
+echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6; }
+if test "${ac_cv_c_compiler_gnu+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_compiler_gnu=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_compiler_gnu=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5
+echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6; }
+GCC=`test $ac_compiler_gnu = yes && echo yes`
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5
+echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6; }
+if test "${ac_cv_prog_cc_g+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_save_c_werror_flag=$ac_c_werror_flag
+ ac_c_werror_flag=yes
+ ac_cv_prog_cc_g=no
+ CFLAGS="-g"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_prog_cc_g=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ CFLAGS=""
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_c_werror_flag=$ac_save_c_werror_flag
+ CFLAGS="-g"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_prog_cc_g=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5
+echo "${ECHO_T}$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+{ echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5
+echo $ECHO_N "checking for $CC option to accept ISO C89... $ECHO_C" >&6; }
+if test "${ac_cv_prog_cc_c89+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has
+ function prototypes and stuff, but not '\xHH' hex character constants.
+ These don't provoke an error unfortunately, instead are silently treated
+ as 'x'. The following induces an error, until -std is added to get
+ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an
+ array size at least. It's necessary to write '\x00'==0 to get something
+ that's true only with -std. */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+ inside strings and character constants. */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_prog_cc_c89=$ac_arg
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+ x)
+ { echo "$as_me:$LINENO: result: none needed" >&5
+echo "${ECHO_T}none needed" >&6; } ;;
+ xno)
+ { echo "$as_me:$LINENO: result: unsupported" >&5
+echo "${ECHO_T}unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c89"
+ { echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5
+echo "${ECHO_T}$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+# Check whether --enable-largefile was given.
+if test "${enable_largefile+set}" = set; then
+ enableval=$enable_largefile;
+fi
+
+if test "$enable_largefile" != no; then
+
+ { echo "$as_me:$LINENO: checking for special C compiler options needed for large files" >&5
+echo $ECHO_N "checking for special C compiler options needed for large files... $ECHO_C" >&6; }
+if test "${ac_cv_sys_largefile_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_cv_sys_largefile_CC=no
+ if test "$GCC" != yes; then
+ ac_save_CC=$CC
+ while :; do
+ # IRIX 6.2 and later do not support large files by default,
+ # so use the C compiler's -n32 option if that helps.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext
+ CC="$CC -n32"
+ rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sys_largefile_CC=' -n32'; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext
+ break
+ done
+ CC=$ac_save_CC
+ rm -f conftest.$ac_ext
+ fi
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_sys_largefile_CC" >&5
+echo "${ECHO_T}$ac_cv_sys_largefile_CC" >&6; }
+ if test "$ac_cv_sys_largefile_CC" != no; then
+ CC=$CC$ac_cv_sys_largefile_CC
+ fi
+
+ { echo "$as_me:$LINENO: checking for _FILE_OFFSET_BITS value needed for large files" >&5
+echo $ECHO_N "checking for _FILE_OFFSET_BITS value needed for large files... $ECHO_C" >&6; }
+if test "${ac_cv_sys_file_offset_bits+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ while :; do
+ ac_cv_sys_file_offset_bits=no
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#define _FILE_OFFSET_BITS 64
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sys_file_offset_bits=64; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ break
+done
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_sys_file_offset_bits" >&5
+echo "${ECHO_T}$ac_cv_sys_file_offset_bits" >&6; }
+if test "$ac_cv_sys_file_offset_bits" != no; then
+
+cat >>confdefs.h <<_ACEOF
+#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits
+_ACEOF
+
+fi
+rm -f conftest*
+ { echo "$as_me:$LINENO: checking for _LARGE_FILES value needed for large files" >&5
+echo $ECHO_N "checking for _LARGE_FILES value needed for large files... $ECHO_C" >&6; }
+if test "${ac_cv_sys_large_files+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ while :; do
+ ac_cv_sys_large_files=no
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#define _LARGE_FILES 1
+#include <sys/types.h>
+ /* Check that off_t can represent 2**63 - 1 correctly.
+ We can't simply define LARGE_OFF_T to be 9223372036854775807,
+ since some C++ compilers masquerading as C compilers
+ incorrectly reject 9223372036854775807. */
+#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+ int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ && LARGE_OFF_T % 2147483647 == 1)
+ ? 1 : -1];
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sys_large_files=1; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ break
+done
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_sys_large_files" >&5
+echo "${ECHO_T}$ac_cv_sys_large_files" >&6; }
+if test "$ac_cv_sys_large_files" != no; then
+
+cat >>confdefs.h <<_ACEOF
+#define _LARGE_FILES $ac_cv_sys_large_files
+_ACEOF
+
+fi
+rm -f conftest*
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}gcc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+ ac_ct_CC=$CC
+ # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="gcc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+echo "${ECHO_T}$ac_ct_CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&5
+echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+else
+ CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="${ac_tool_prefix}cc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+ fi
+fi
+if test -z "$CC"; then
+ # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+ ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+ ac_prog_rejected=yes
+ continue
+ fi
+ ac_cv_prog_CC="cc"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+ # We found a bogon in the path, so make sure we never use it.
+ set dummy $ac_cv_prog_CC
+ shift
+ if test $# != 0; then
+ # We chose a different compiler from the bogus one.
+ # However, it has the same basename, so the bogon will be chosen
+ # first if we set CC to just the basename; use the full file name.
+ shift
+ ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+ fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in cl.exe
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$CC"; then
+ ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+ { echo "$as_me:$LINENO: result: $CC" >&5
+echo "${ECHO_T}$CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+ test -n "$CC" && break
+ done
+fi
+if test -z "$CC"; then
+ ac_ct_CC=$CC
+ for ac_prog in cl.exe
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_CC"; then
+ ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_CC="$ac_prog"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+ { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
+echo "${ECHO_T}$ac_ct_CC" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CC" && break
+done
+
+ if test "x$ac_ct_CC" = x; then
+ CC=""
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&5
+echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&2;}
+ac_tool_warned=yes ;;
+esac
+ CC=$ac_ct_CC
+ fi
+fi
+
+fi
+
+
+test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&5
+echo "$as_me: error: no acceptable C compiler found in \$PATH
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+
+# Provide some information about the compiler.
+echo "$as_me:$LINENO: checking for C compiler version" >&5
+ac_compiler=`set X $ac_compile; echo $2`
+{ (ac_try="$ac_compiler --version >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compiler --version >&5") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (ac_try="$ac_compiler -v >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compiler -v >&5") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+{ (ac_try="$ac_compiler -V >&5"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compiler -V >&5") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }
+
+{ echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5
+echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6; }
+if test "${ac_cv_c_compiler_gnu+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+#ifndef __GNUC__
+ choke me
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_compiler_gnu=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_compiler_gnu=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5
+echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6; }
+GCC=`test $ac_compiler_gnu = yes && echo yes`
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5
+echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6; }
+if test "${ac_cv_prog_cc_g+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_save_c_werror_flag=$ac_c_werror_flag
+ ac_c_werror_flag=yes
+ ac_cv_prog_cc_g=no
+ CFLAGS="-g"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_prog_cc_g=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ CFLAGS=""
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_c_werror_flag=$ac_save_c_werror_flag
+ CFLAGS="-g"
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_prog_cc_g=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5
+echo "${ECHO_T}$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+ CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+ if test "$GCC" = yes; then
+ CFLAGS="-g -O2"
+ else
+ CFLAGS="-g"
+ fi
+else
+ if test "$GCC" = yes; then
+ CFLAGS="-O2"
+ else
+ CFLAGS=
+ fi
+fi
+{ echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5
+echo $ECHO_N "checking for $CC option to accept ISO C89... $ECHO_C" >&6; }
+if test "${ac_cv_prog_cc_c89+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdarg.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+ char **p;
+ int i;
+{
+ return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+ char *s;
+ va_list v;
+ va_start (v,p);
+ s = g (p, va_arg (v,int));
+ va_end (v);
+ return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has
+ function prototypes and stuff, but not '\xHH' hex character constants.
+ These don't provoke an error unfortunately, instead are silently treated
+ as 'x'. The following induces an error, until -std is added to get
+ proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an
+ array size at least. It's necessary to write '\x00'==0 to get something
+ that's true only with -std. */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+ inside strings and character constants. */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1];
+ ;
+ return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+ CC="$ac_save_CC $ac_arg"
+ rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_prog_cc_c89=$ac_arg
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext
+ test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+ x)
+ { echo "$as_me:$LINENO: result: none needed" >&5
+echo "${ECHO_T}none needed" >&6; } ;;
+ xno)
+ { echo "$as_me:$LINENO: result: unsupported" >&5
+echo "${ECHO_T}unsupported" >&6; } ;;
+ *)
+ CC="$CC $ac_cv_prog_cc_c89"
+ { echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5
+echo "${ECHO_T}$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ranlib; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_RANLIB+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$RANLIB"; then
+ ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+RANLIB=$ac_cv_prog_RANLIB
+if test -n "$RANLIB"; then
+ { echo "$as_me:$LINENO: result: $RANLIB" >&5
+echo "${ECHO_T}$RANLIB" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_RANLIB"; then
+ ac_ct_RANLIB=$RANLIB
+ # Extract the first word of "ranlib", so it can be a program name with args.
+set dummy ranlib; ac_word=$2
+{ echo "$as_me:$LINENO: checking for $ac_word" >&5
+echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
+if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test -n "$ac_ct_RANLIB"; then
+ ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_executable_p "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_prog_ac_ct_RANLIB="ranlib"
+ echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
+if test -n "$ac_ct_RANLIB"; then
+ { echo "$as_me:$LINENO: result: $ac_ct_RANLIB" >&5
+echo "${ECHO_T}$ac_ct_RANLIB" >&6; }
+else
+ { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+fi
+
+ if test "x$ac_ct_RANLIB" = x; then
+ RANLIB=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&5
+echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools
+whose name does not start with the host triplet. If you think this
+configuration is useful to you, please write to autoconf@gnu.org." >&2;}
+ac_tool_warned=yes ;;
+esac
+ RANLIB=$ac_ct_RANLIB
+ fi
+else
+ RANLIB="$ac_cv_prog_RANLIB"
+fi
+
+ac_aux_dir=
+for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ { { echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&5
+echo "$as_me: error: cannot find install-sh or install.sh in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
+
+
+# Find a good install program. We prefer a C program (faster),
+# so one script is as good as another. But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# OS/2's system install, which has a completely different semantic
+# ./install, which can be erroneously created by make from ./install.sh.
+{ echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5
+echo $ECHO_N "checking for a BSD-compatible install... $ECHO_C" >&6; }
+if test -z "$INSTALL"; then
+if test "${ac_cv_path_install+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in
+ ./ | .// | /cC/* | \
+ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+ ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \
+ /usr/ucb/* ) ;;
+ *)
+ # OSF1 and SCO ODT 3.0 have their own names for install.
+ # Don't use installbsd from OSF since it installs stuff as root
+ # by default.
+ for ac_prog in ginstall scoinst install; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_executable_p "$as_dir/$ac_prog$ac_exec_ext"; }; then
+ if test $ac_prog = install &&
+ grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # AIX install. It has an incompatible calling convention.
+ :
+ elif test $ac_prog = install &&
+ grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ # program-specific install script used by HP pwplus--don't use.
+ :
+ else
+ ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ break 3
+ fi
+ fi
+ done
+ done
+ ;;
+esac
+done
+IFS=$as_save_IFS
+
+
+fi
+ if test "${ac_cv_path_install+set}" = set; then
+ INSTALL=$ac_cv_path_install
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for INSTALL within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ INSTALL=$ac_install_sh
+ fi
+fi
+{ echo "$as_me:$LINENO: result: $INSTALL" >&5
+echo "${ECHO_T}$INSTALL" >&6; }
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5
+echo $ECHO_N "checking how to run the C preprocessor... $ECHO_C" >&6; }
+# On Suns, sometimes $CPP names a directory.
+if test -n "$CPP" && test -d "$CPP"; then
+ CPP=
+fi
+if test -z "$CPP"; then
+ if test "${ac_cv_prog_CPP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ # Double quotes because CPP needs to be expanded
+ for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
+ do
+ ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ ac_cpp_err=$ac_cpp_err$ac_c_werror_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Broken: fails on valid input.
+continue
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ ac_cpp_err=$ac_cpp_err$ac_c_werror_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ # Broken: success on invalid input.
+continue
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+ break
+fi
+
+ done
+ ac_cv_prog_CPP=$CPP
+
+fi
+ CPP=$ac_cv_prog_CPP
+else
+ ac_cv_prog_CPP=$CPP
+fi
+{ echo "$as_me:$LINENO: result: $CPP" >&5
+echo "${ECHO_T}$CPP" >&6; }
+ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+ # Use a header file that comes with gcc, so configuring glibc
+ # with a fresh cross-compiler works.
+ # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ # <limits.h> exists even on freestanding compilers.
+ # On the NeXT, cc -E runs the code through the compiler's parser,
+ # not just through cpp. "Syntax error" is here to catch this case.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+ Syntax error
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ ac_cpp_err=$ac_cpp_err$ac_c_werror_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ :
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Broken: fails on valid input.
+continue
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+ # OK, works on sane cases. Now check whether nonexistent headers
+ # can be detected and how.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ac_nonexistent.h>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ ac_cpp_err=$ac_cpp_err$ac_c_werror_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ # Broken: success on invalid input.
+continue
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then
+ :
+else
+ { { echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details." >&5
+echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details." >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+{ echo "$as_me:$LINENO: checking for grep that handles long lines and -e" >&5
+echo $ECHO_N "checking for grep that handles long lines and -e... $ECHO_C" >&6; }
+if test "${ac_cv_path_GREP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ # Extract the first word of "grep ggrep" to use in msg output
+if test -z "$GREP"; then
+set dummy grep ggrep; ac_prog_name=$2
+if test "${ac_cv_path_GREP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_path_GREP_found=false
+# Loop through the user's path and test for each of PROGNAME-LIST
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in grep ggrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_GREP" && $as_executable_p "$ac_path_GREP"; } || continue
+ # Check for GNU ac_path_GREP and select it if it is found.
+ # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+ ac_count=0
+ echo $ECHO_N "0123456789$ECHO_C" >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ echo 'GREP' >> "conftest.nl"
+ "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ ac_count=`expr $ac_count + 1`
+ if test $ac_count -gt ${ac_path_GREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_GREP="$ac_path_GREP"
+ ac_path_GREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+
+ $ac_path_GREP_found && break 3
+ done
+done
+
+done
+IFS=$as_save_IFS
+
+
+fi
+
+GREP="$ac_cv_path_GREP"
+if test -z "$GREP"; then
+ { { echo "$as_me:$LINENO: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5
+echo "$as_me: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+else
+ ac_cv_path_GREP=$GREP
+fi
+
+
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_path_GREP" >&5
+echo "${ECHO_T}$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ echo "$as_me:$LINENO: checking for egrep" >&5
+echo $ECHO_N "checking for egrep... $ECHO_C" >&6; }
+if test "${ac_cv_path_EGREP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+ then ac_cv_path_EGREP="$GREP -E"
+ else
+ # Extract the first word of "egrep" to use in msg output
+if test -z "$EGREP"; then
+set dummy egrep; ac_prog_name=$2
+if test "${ac_cv_path_EGREP+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_path_EGREP_found=false
+# Loop through the user's path and test for each of PROGNAME-LIST
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in egrep; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
+ { test -f "$ac_path_EGREP" && $as_executable_p "$ac_path_EGREP"; } || continue
+ # Check for GNU ac_path_EGREP and select it if it is found.
+ # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+ ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+ ac_count=0
+ echo $ECHO_N "0123456789$ECHO_C" >"conftest.in"
+ while :
+ do
+ cat "conftest.in" "conftest.in" >"conftest.tmp"
+ mv "conftest.tmp" "conftest.in"
+ cp "conftest.in" "conftest.nl"
+ echo 'EGREP' >> "conftest.nl"
+ "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+ diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+ ac_count=`expr $ac_count + 1`
+ if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+ # Best one so far, save it but keep looking for a better one
+ ac_cv_path_EGREP="$ac_path_EGREP"
+ ac_path_EGREP_max=$ac_count
+ fi
+ # 10*(2^10) chars as input seems more than enough
+ test $ac_count -gt 10 && break
+ done
+ rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+
+ $ac_path_EGREP_found && break 3
+ done
+done
+
+done
+IFS=$as_save_IFS
+
+
+fi
+
+EGREP="$ac_cv_path_EGREP"
+if test -z "$EGREP"; then
+ { { echo "$as_me:$LINENO: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5
+echo "$as_me: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;}
+ { (exit 1); exit 1; }; }
+fi
+
+else
+ ac_cv_path_EGREP=$EGREP
+fi
+
+
+ fi
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_path_EGREP" >&5
+echo "${ECHO_T}$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ echo "$as_me:$LINENO: checking for ANSI C header files" >&5
+echo $ECHO_N "checking for ANSI C header files... $ECHO_C" >&6; }
+if test "${ac_cv_header_stdc+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_header_stdc=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_header_stdc=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+ # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "memchr" >/dev/null 2>&1; then
+ :
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+ $EGREP "free" >/dev/null 2>&1; then
+ :
+else
+ ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+ # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+ if test "$cross_compiling" = yes; then
+ :
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <ctype.h>
+#include <stdlib.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+ (('a' <= (c) && (c) <= 'i') \
+ || ('j' <= (c) && (c) <= 'r') \
+ || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ if (XOR (islower (i), ISLOWER (i))
+ || toupper (i) != TOUPPER (i))
+ return 2;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ :
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+ac_cv_header_stdc=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+
+
+fi
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5
+echo "${ECHO_T}$ac_cv_header_stdc" >&6; }
+if test $ac_cv_header_stdc = yes; then
+
+cat >>confdefs.h <<\_ACEOF
+#define STDC_HEADERS 1
+_ACEOF
+
+fi
+
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+
+
+
+
+
+
+
+
+
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+ inttypes.h stdint.h unistd.h
+do
+as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
+{ echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_Header=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ eval "$as_ac_Header=no"
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+ac_res=`eval echo '${'$as_ac_Header'}'`
+ { echo "$as_me:$LINENO: result: $ac_res" >&5
+echo "${ECHO_T}$ac_res" >&6; }
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+{ echo "$as_me:$LINENO: checking for short" >&5
+echo $ECHO_N "checking for short... $ECHO_C" >&6; }
+if test "${ac_cv_type_short+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+typedef short ac__type_new_;
+int
+main ()
+{
+if ((ac__type_new_ *) 0)
+ return 0;
+if (sizeof (ac__type_new_))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_short=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_type_short=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_type_short" >&5
+echo "${ECHO_T}$ac_cv_type_short" >&6; }
+
+{ echo "$as_me:$LINENO: checking size of short" >&5
+echo $ECHO_N "checking size of short... $ECHO_C" >&6; }
+if test "${ac_cv_sizeof_short+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_short" = yes; then
+ # The cast to long int works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef short ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef short ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef short ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef short ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_lo= ac_hi=
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef short ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_short=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (short)
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (short)
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; } ;;
+esac
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef short ac__type_sizeof_;
+static long int longval () { return (long int) (sizeof (ac__type_sizeof_)); }
+static unsigned long int ulongval () { return (long int) (sizeof (ac__type_sizeof_)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ return 1;
+ if (((long int) (sizeof (ac__type_sizeof_))) < 0)
+ {
+ long int i = longval ();
+ if (i != ((long int) (sizeof (ac__type_sizeof_))))
+ return 1;
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long int i = ulongval ();
+ if (i != ((long int) (sizeof (ac__type_sizeof_))))
+ return 1;
+ fprintf (f, "%lu\n", i);
+ }
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_short=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (short)
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (short)
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; }
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_short=0
+fi
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_sizeof_short" >&5
+echo "${ECHO_T}$ac_cv_sizeof_short" >&6; }
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_SHORT $ac_cv_sizeof_short
+_ACEOF
+
+
+{ echo "$as_me:$LINENO: checking for int" >&5
+echo $ECHO_N "checking for int... $ECHO_C" >&6; }
+if test "${ac_cv_type_int+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+typedef int ac__type_new_;
+int
+main ()
+{
+if ((ac__type_new_ *) 0)
+ return 0;
+if (sizeof (ac__type_new_))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_int=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_type_int=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_type_int" >&5
+echo "${ECHO_T}$ac_cv_type_int" >&6; }
+
+{ echo "$as_me:$LINENO: checking size of int" >&5
+echo $ECHO_N "checking size of int... $ECHO_C" >&6; }
+if test "${ac_cv_sizeof_int+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_int" = yes; then
+ # The cast to long int works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef int ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef int ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef int ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef int ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_lo= ac_hi=
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef int ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_int=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (int)
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (int)
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; } ;;
+esac
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef int ac__type_sizeof_;
+static long int longval () { return (long int) (sizeof (ac__type_sizeof_)); }
+static unsigned long int ulongval () { return (long int) (sizeof (ac__type_sizeof_)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ return 1;
+ if (((long int) (sizeof (ac__type_sizeof_))) < 0)
+ {
+ long int i = longval ();
+ if (i != ((long int) (sizeof (ac__type_sizeof_))))
+ return 1;
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long int i = ulongval ();
+ if (i != ((long int) (sizeof (ac__type_sizeof_))))
+ return 1;
+ fprintf (f, "%lu\n", i);
+ }
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_int=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (int)
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (int)
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; }
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_int=0
+fi
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_sizeof_int" >&5
+echo "${ECHO_T}$ac_cv_sizeof_int" >&6; }
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_INT $ac_cv_sizeof_int
+_ACEOF
+
+
+{ echo "$as_me:$LINENO: checking for long" >&5
+echo $ECHO_N "checking for long... $ECHO_C" >&6; }
+if test "${ac_cv_type_long+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+typedef long ac__type_new_;
+int
+main ()
+{
+if ((ac__type_new_ *) 0)
+ return 0;
+if (sizeof (ac__type_new_))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_type_long=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_type_long=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_type_long" >&5
+echo "${ECHO_T}$ac_cv_type_long" >&6; }
+
+{ echo "$as_me:$LINENO: checking size of long" >&5
+echo $ECHO_N "checking size of long... $ECHO_C" >&6; }
+if test "${ac_cv_sizeof_long+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ if test "$ac_cv_type_long" = yes; then
+ # The cast to long int works around a bug in the HP C Compiler
+ # version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+ # declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+ # This bug is HP SR number 8606223364.
+ if test "$cross_compiling" = yes; then
+ # Depending upon the size, compute the lo and hi bounds.
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef long ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=0 ac_mid=0
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef long ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_lo=`expr $ac_mid + 1`
+ if test $ac_lo -le $ac_mid; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid + 1`
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef long ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) < 0)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=-1 ac_mid=-1
+ while :; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef long ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_lo=$ac_mid; break
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_hi=`expr '(' $ac_mid ')' - 1`
+ if test $ac_mid -le $ac_hi; then
+ ac_lo= ac_hi=
+ break
+ fi
+ ac_mid=`expr 2 '*' $ac_mid`
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_lo= ac_hi=
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+ ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo`
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef long ac__type_sizeof_;
+int
+main ()
+{
+static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)];
+test_array [0] = 0
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_hi=$ac_mid
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_lo=`expr '(' $ac_mid ')' + 1`
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in
+?*) ac_cv_sizeof_long=$ac_lo;;
+'') { { echo "$as_me:$LINENO: error: cannot compute sizeof (long)
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (long)
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; } ;;
+esac
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+ typedef long ac__type_sizeof_;
+static long int longval () { return (long int) (sizeof (ac__type_sizeof_)); }
+static unsigned long int ulongval () { return (long int) (sizeof (ac__type_sizeof_)); }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+ FILE *f = fopen ("conftest.val", "w");
+ if (! f)
+ return 1;
+ if (((long int) (sizeof (ac__type_sizeof_))) < 0)
+ {
+ long int i = longval ();
+ if (i != ((long int) (sizeof (ac__type_sizeof_))))
+ return 1;
+ fprintf (f, "%ld\n", i);
+ }
+ else
+ {
+ unsigned long int i = ulongval ();
+ if (i != ((long int) (sizeof (ac__type_sizeof_))))
+ return 1;
+ fprintf (f, "%lu\n", i);
+ }
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_link") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_sizeof_long=`cat conftest.val`
+else
+ echo "$as_me: program exited with status $ac_status" >&5
+echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+( exit $ac_status )
+{ { echo "$as_me:$LINENO: error: cannot compute sizeof (long)
+See \`config.log' for more details." >&5
+echo "$as_me: error: cannot compute sizeof (long)
+See \`config.log' for more details." >&2;}
+ { (exit 77); exit 77; }; }
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f conftest.val
+else
+ ac_cv_sizeof_long=0
+fi
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_sizeof_long" >&5
+echo "${ECHO_T}$ac_cv_sizeof_long" >&6; }
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_LONG $ac_cv_sizeof_long
+_ACEOF
+
+
+
+{ echo "$as_me:$LINENO: checking for an ANSI C-conforming const" >&5
+echo $ECHO_N "checking for an ANSI C-conforming const... $ECHO_C" >&6; }
+if test "${ac_cv_c_const+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+
+int
+main ()
+{
+/* FIXME: Include the comments suggested by Paul. */
+#ifndef __cplusplus
+ /* Ultrix mips cc rejects this. */
+ typedef int charset[2];
+ const charset x;
+ /* SunOS 4.1.1 cc rejects this. */
+ char const *const *ccp;
+ char **p;
+ /* NEC SVR4.0.2 mips cc rejects this. */
+ struct point {int x, y;};
+ static struct point const zero = {0,0};
+ /* AIX XL C 1.02.0.0 rejects this.
+ It does not let you subtract one const X* pointer from another in
+ an arm of an if-expression whose if-part is not a constant
+ expression */
+ const char *g = "string";
+ ccp = &g + (g ? g-g : 0);
+ /* HPUX 7.0 cc rejects these. */
+ ++ccp;
+ p = (char**) ccp;
+ ccp = (char const *const *) p;
+ { /* SCO 3.2v4 cc rejects this. */
+ char *t;
+ char const *s = 0 ? (char *) 0 : (char const *) 0;
+
+ *t++ = 0;
+ if (s) return 0;
+ }
+ { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */
+ int x[] = {25, 17};
+ const int *foo = &x[0];
+ ++foo;
+ }
+ { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */
+ typedef const int *iptr;
+ iptr p = 0;
+ ++p;
+ }
+ { /* AIX XL C 1.02.0.0 rejects this saying
+ "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */
+ struct s { int j; const int *ap[3]; };
+ struct s *b; b->j = 5;
+ }
+ { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */
+ const int foo = 10;
+ if (!foo) return 0;
+ }
+ return !x[0] && !zero.x;
+#endif
+
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_c_const=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_cv_c_const=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_c_const" >&5
+echo "${ECHO_T}$ac_cv_c_const" >&6; }
+if test $ac_cv_c_const = no; then
+
+cat >>confdefs.h <<\_ACEOF
+#define const
+_ACEOF
+
+fi
+
+{ echo "$as_me:$LINENO: checking for inline" >&5
+echo $ECHO_N "checking for inline... $ECHO_C" >&6; }
+if test "${ac_cv_c_inline+set}" = set; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ ac_cv_c_inline=no
+for ac_kw in inline __inline__ __inline; do
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#ifndef __cplusplus
+typedef int foo_t;
+static $ac_kw foo_t static_foo () {return 0; }
+$ac_kw foo_t foo () {return 0; }
+#endif
+
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_cv_c_inline=$ac_kw
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ test "$ac_cv_c_inline" != no && break
+done
+
+fi
+{ echo "$as_me:$LINENO: result: $ac_cv_c_inline" >&5
+echo "${ECHO_T}$ac_cv_c_inline" >&6; }
+
+
+case $ac_cv_c_inline in
+ inline | yes) ;;
+ *)
+ case $ac_cv_c_inline in
+ no) ac_val=;;
+ *) ac_val=$ac_cv_c_inline;;
+ esac
+ cat >>confdefs.h <<_ACEOF
+#ifndef __cplusplus
+#define inline $ac_val
+#endif
+_ACEOF
+ ;;
+esac
+
+
+for ac_header in getopt.h
+do
+as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ { echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+fi
+ac_res=`eval echo '${'$as_ac_Header'}'`
+ { echo "$as_me:$LINENO: result: $ac_res" >&5
+echo "${ECHO_T}$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ echo "$as_me:$LINENO: checking $ac_header usability" >&5
+echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_header_compiler=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_compiler=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+echo "${ECHO_T}$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ echo "$as_me:$LINENO: checking $ac_header presence" >&5
+echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <$ac_header>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ ac_cpp_err=$ac_cpp_err$ac_c_werror_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ ac_header_preproc=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+
+rm -f conftest.err conftest.$ac_ext
+{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+echo "${ECHO_T}$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
+ yes:no: )
+ { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
+ ac_header_preproc=yes
+ ;;
+ no:yes:* )
+ { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5
+echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
+echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5
+echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
+echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
+
+ ;;
+esac
+{ echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ eval "$as_ac_Header=\$ac_header_preproc"
+fi
+ac_res=`eval echo '${'$as_ac_Header'}'`
+ { echo "$as_me:$LINENO: result: $ac_res" >&5
+echo "${ECHO_T}$ac_res" >&6; }
+
+fi
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+for ac_func in getopt_long
+do
+as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
+{ echo "$as_me:$LINENO: checking for $ac_func" >&5
+echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6; }
+if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+/* Define $ac_func to an innocuous variant, in case <limits.h> declares $ac_func.
+ For example, HP-UX 11i <limits.h> declares gettimeofday. */
+#define $ac_func innocuous_$ac_func
+
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $ac_func (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef $ac_func
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $ac_func ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined __stub_$ac_func || defined __stub___$ac_func
+choke me
+#endif
+
+int
+main ()
+{
+return $ac_func ();
+ ;
+ return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_link") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest$ac_exeext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ eval "$as_ac_var=yes"
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ eval "$as_ac_var=no"
+fi
+
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+ac_res=`eval echo '${'$as_ac_var'}'`
+ { echo "$as_me:$LINENO: result: $ac_res" >&5
+echo "${ECHO_T}$ac_res" >&6; }
+if test `eval echo '${'$as_ac_var'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+done
+
+
+if test "$GCC" = yes; then
+ CFLAGS="$CFLAGS -O3 -funroll-all-loops"
+else
+ { echo "$as_me:$LINENO: result: no gcc" >&5
+echo "${ECHO_T}no gcc" >&6; }
+fi
+
+ac_config_files="$ac_config_files Makefile"
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems. If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+ for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+ eval ac_val=\$$ac_var
+ case $ac_val in #(
+ *${as_nl}*)
+ case $ac_var in #(
+ *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5
+echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
+ esac
+ case $ac_var in #(
+ _ | IFS | as_nl) ;; #(
+ *) $as_unset $ac_var ;;
+ esac ;;
+ esac
+ done
+
+ (set) 2>&1 |
+ case $as_nl`(ac_space=' '; set) 2>&1` in #(
+ *${as_nl}ac_space=\ *)
+ # `set' does not quote correctly, so add quotes (double-quote
+ # substitution turns \\\\ into \\, and sed turns \\ into \).
+ sed -n \
+ "s/'/'\\\\''/g;
+ s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+ ;; #(
+ *)
+ # `set' quotes correctly as required by POSIX, so do not add quotes.
+ sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+ ;;
+ esac |
+ sort
+) |
+ sed '
+ /^ac_cv_env_/b end
+ t clear
+ :clear
+ s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+ t end
+ s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+ :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+ if test -w "$cache_file"; then
+ test "x$cache_file" != "x/dev/null" &&
+ { echo "$as_me:$LINENO: updating cache $cache_file" >&5
+echo "$as_me: updating cache $cache_file" >&6;}
+ cat confcache >$cache_file
+ else
+ { echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5
+echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+ fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+ # 1. Remove the extension, and $U if already installed.
+ ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+ ac_i=`echo "$ac_i" | sed "$ac_script"`
+ # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR
+ # will be set to the directory where LIBOBJS objects are built.
+ ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+ ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+
+: ${CONFIG_STATUS=./config.status}
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5
+echo "$as_me: creating $CONFIG_STATUS" >&6;}
+cat >$CONFIG_STATUS <<_ACEOF
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+SHELL=\${CONFIG_SHELL-$SHELL}
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+## --------------------- ##
+## M4sh Initialization. ##
+## --------------------- ##
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+ emulate sh
+ NULLCMD=:
+ # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+ # is contrary to our usage. Disable this feature.
+ alias -g '${1+"$@"}'='"$@"'
+ setopt NO_GLOB_SUBST
+else
+ case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+
+# PATH needs CR
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+ echo "#! /bin/sh" >conf$$.sh
+ echo "exit 0" >>conf$$.sh
+ chmod +x conf$$.sh
+ if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
+ PATH_SEPARATOR=';'
+ else
+ PATH_SEPARATOR=:
+ fi
+ rm -f conf$$.sh
+fi
+
+# Support unset when possible.
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+ as_unset=unset
+else
+ as_unset=false
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order. Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+as_nl='
+'
+IFS=" "" $as_nl"
+
+# Find who we are. Look in the path if we contain no directory separator.
+case $0 in
+ *[\\/]* ) as_myself=$0 ;;
+ *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+done
+IFS=$as_save_IFS
+
+ ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+ as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+ echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+ { (exit 1); exit 1; }
+fi
+
+# Work around bugs in pre-3.0 UWIN ksh.
+for as_var in ENV MAIL MAILPATH
+do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+for as_var in \
+ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
+ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
+ LC_TELEPHONE LC_TIME
+do
+ if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
+ eval $as_var=C; export $as_var
+ else
+ ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
+ fi
+done
+
+# Required to use basename.
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+ as_basename=basename
+else
+ as_basename=false
+fi
+
+
+# Name of the executable.
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ X"$0" : 'X\(//\)$' \| \
+ X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+echo X/"$0" |
+ sed '/^.*\/\([^/][^/]*\)\/*$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\/\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+
+# CDPATH.
+$as_unset CDPATH
+
+
+
+ as_lineno_1=$LINENO
+ as_lineno_2=$LINENO
+ test "x$as_lineno_1" != "x$as_lineno_2" &&
+ test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
+
+ # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
+ # uniformly replaced by the line number. The first 'sed' inserts a
+ # line-number line after each line using $LINENO; the second 'sed'
+ # does the real work. The second script uses 'N' to pair each
+ # line-number line with the line containing $LINENO, and appends
+ # trailing '-' during substitution so that $LINENO is not a special
+ # case at line end.
+ # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
+ # scripts with optimization help from Paolo Bonzini. Blame Lee
+ # E. McMahon (1931-1989) for sed's syntax. :-)
+ sed -n '
+ p
+ /[$]LINENO/=
+ ' <$as_myself |
+ sed '
+ s/[$]LINENO.*/&-/
+ t lineno
+ b
+ :lineno
+ N
+ :loop
+ s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+ t loop
+ s/-\n.*//
+ ' >$as_me.lineno &&
+ chmod +x "$as_me.lineno" ||
+ { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
+ { (exit 1); exit 1; }; }
+
+ # Don't try to exec as it changes $[0], causing all sort of problems
+ # (the dirname of $[0] is not the place where we might find the
+ # original and so on. Autoconf is especially sensitive to this).
+ . "./$as_me.lineno"
+ # Exit status is that of the last command.
+ exit
+}
+
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+ as_dirname=dirname
+else
+ as_dirname=false
+fi
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in
+-n*)
+ case `echo 'x\c'` in
+ *c*) ECHO_T=' ';; # ECHO_T is single tab character.
+ *) ECHO_C='\c';;
+ esac;;
+*)
+ ECHO_N='-n';;
+esac
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+ test "X`expr 00001 : '.*\(...\)'`" = X001; then
+ as_expr=expr
+else
+ as_expr=false
+fi
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+ rm -f conf$$.dir/conf$$.file
+else
+ rm -f conf$$.dir
+ mkdir conf$$.dir
+fi
+echo >conf$$.file
+if ln -s conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s='ln -s'
+ # ... but there are two gotchas:
+ # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+ # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+ # In both cases, we have to default to `cp -p'.
+ ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+ as_ln_s='cp -p'
+elif ln conf$$.file conf$$ 2>/dev/null; then
+ as_ln_s=ln
+else
+ as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+ as_mkdir_p=:
+else
+ test -d ./-p && rmdir ./-p
+ as_mkdir_p=false
+fi
+
+# Find out whether ``test -x'' works. Don't use a zero-byte file, as
+# systems may use methods other than mode bits to determine executability.
+cat >conf$$.file <<_ASEOF
+#! /bin/sh
+exit 0
+_ASEOF
+chmod +x conf$$.file
+if test -x conf$$.file >/dev/null 2>&1; then
+ as_executable_p="test -x"
+else
+ as_executable_p=:
+fi
+rm -f conf$$.file
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+
+# Save the log message, to keep $[0] and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by $as_me, which was
+generated by GNU Autoconf 2.60. Invocation command line was
+
+ CONFIG_FILES = $CONFIG_FILES
+ CONFIG_HEADERS = $CONFIG_HEADERS
+ CONFIG_LINKS = $CONFIG_LINKS
+ CONFIG_COMMANDS = $CONFIG_COMMANDS
+ $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<_ACEOF
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_headers="$ac_config_headers"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+ac_cs_usage="\
+\`$as_me' instantiates files from templates according to the
+current configuration.
+
+Usage: $0 [OPTIONS] [FILE]...
+
+ -h, --help print this help, then exit
+ -V, --version print version number, then exit
+ -q, --quiet do not print progress messages
+ -d, --debug don't remove temporary files
+ --recheck update $as_me by reconfiguring in the same conditions
+ --file=FILE[:TEMPLATE]
+ instantiate the configuration file FILE
+ --header=FILE[:TEMPLATE]
+ instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Report bugs to <bug-autoconf@gnu.org>."
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+ac_cs_version="\\
+config.status
+configured by $0, generated by GNU Autoconf 2.60,
+ with options \\"`echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\"
+
+Copyright (C) 2006 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+INSTALL='$INSTALL'
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+# If no file are specified by the user, then we need to provide default
+# value. By we need to know if files were specified by the user.
+ac_need_defaults=:
+while test $# != 0
+do
+ case $1 in
+ --*=*)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+ ac_shift=:
+ ;;
+ *)
+ ac_option=$1
+ ac_optarg=$2
+ ac_shift=shift
+ ;;
+ esac
+
+ case $ac_option in
+ # Handling of the options.
+ -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+ ac_cs_recheck=: ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+ echo "$ac_cs_version"; exit ;;
+ --debug | --debu | --deb | --de | --d | -d )
+ debug=: ;;
+ --file | --fil | --fi | --f )
+ $ac_shift
+ CONFIG_FILES="$CONFIG_FILES $ac_optarg"
+ ac_need_defaults=false;;
+ --header | --heade | --head | --hea )
+ $ac_shift
+ CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg"
+ ac_need_defaults=false;;
+ --he | --h)
+ # Conflict between --help and --header
+ { echo "$as_me: error: ambiguous option: $1
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; };;
+ --help | --hel | -h )
+ echo "$ac_cs_usage"; exit ;;
+ -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+ | -silent | --silent | --silen | --sile | --sil | --si | --s)
+ ac_cs_silent=: ;;
+
+ # This is an error.
+ -*) { echo "$as_me: error: unrecognized option: $1
+Try \`$0 --help' for more information." >&2
+ { (exit 1); exit 1; }; } ;;
+
+ *) ac_config_targets="$ac_config_targets $1"
+ ac_need_defaults=false ;;
+
+ esac
+ shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+ exec 6>/dev/null
+ ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+if \$ac_cs_recheck; then
+ echo "running CONFIG_SHELL=$SHELL $SHELL $0 "$ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6
+ CONFIG_SHELL=$SHELL
+ export CONFIG_SHELL
+ exec $SHELL "$0"$ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+exec 5>>config.log
+{
+ echo
+ sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+ echo "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+ case $ac_config_target in
+ "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;;
+ "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+
+ *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
+echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used. Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+ test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+ test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+fi
+
+# Have a temporary directory for convenience. Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+ tmp=
+ trap 'exit_status=$?
+ { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
+' 0
+ trap '{ (exit 1); exit 1; }' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+ tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+ test -n "$tmp" && test -d "$tmp"
+} ||
+{
+ tmp=./conf$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+} ||
+{
+ echo "$me: cannot create a temporary directory in ." >&2
+ { (exit 1); exit 1; }
+}
+
+#
+# Set up the sed scripts for CONFIG_FILES section.
+#
+
+# No need to generate the scripts if there are no CONFIG_FILES.
+# This happens for instance when ./config.status config.h
+if test -n "$CONFIG_FILES"; then
+
+_ACEOF
+
+
+
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+ cat >conf$$subs.sed <<_ACEOF
+SHELL!$SHELL$ac_delim
+PATH_SEPARATOR!$PATH_SEPARATOR$ac_delim
+PACKAGE_NAME!$PACKAGE_NAME$ac_delim
+PACKAGE_TARNAME!$PACKAGE_TARNAME$ac_delim
+PACKAGE_VERSION!$PACKAGE_VERSION$ac_delim
+PACKAGE_STRING!$PACKAGE_STRING$ac_delim
+PACKAGE_BUGREPORT!$PACKAGE_BUGREPORT$ac_delim
+exec_prefix!$exec_prefix$ac_delim
+prefix!$prefix$ac_delim
+program_transform_name!$program_transform_name$ac_delim
+bindir!$bindir$ac_delim
+sbindir!$sbindir$ac_delim
+libexecdir!$libexecdir$ac_delim
+datarootdir!$datarootdir$ac_delim
+datadir!$datadir$ac_delim
+sysconfdir!$sysconfdir$ac_delim
+sharedstatedir!$sharedstatedir$ac_delim
+localstatedir!$localstatedir$ac_delim
+includedir!$includedir$ac_delim
+oldincludedir!$oldincludedir$ac_delim
+docdir!$docdir$ac_delim
+infodir!$infodir$ac_delim
+htmldir!$htmldir$ac_delim
+dvidir!$dvidir$ac_delim
+pdfdir!$pdfdir$ac_delim
+psdir!$psdir$ac_delim
+libdir!$libdir$ac_delim
+localedir!$localedir$ac_delim
+mandir!$mandir$ac_delim
+DEFS!$DEFS$ac_delim
+ECHO_C!$ECHO_C$ac_delim
+ECHO_N!$ECHO_N$ac_delim
+ECHO_T!$ECHO_T$ac_delim
+LIBS!$LIBS$ac_delim
+build_alias!$build_alias$ac_delim
+host_alias!$host_alias$ac_delim
+target_alias!$target_alias$ac_delim
+CC!$CC$ac_delim
+CFLAGS!$CFLAGS$ac_delim
+LDFLAGS!$LDFLAGS$ac_delim
+CPPFLAGS!$CPPFLAGS$ac_delim
+ac_ct_CC!$ac_ct_CC$ac_delim
+EXEEXT!$EXEEXT$ac_delim
+OBJEXT!$OBJEXT$ac_delim
+RANLIB!$RANLIB$ac_delim
+INSTALL_PROGRAM!$INSTALL_PROGRAM$ac_delim
+INSTALL_SCRIPT!$INSTALL_SCRIPT$ac_delim
+INSTALL_DATA!$INSTALL_DATA$ac_delim
+CPP!$CPP$ac_delim
+GREP!$GREP$ac_delim
+EGREP!$EGREP$ac_delim
+LIBOBJS!$LIBOBJS$ac_delim
+LTLIBOBJS!$LTLIBOBJS$ac_delim
+_ACEOF
+
+ if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 53; then
+ break
+ elif $ac_last_try; then
+ { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
+echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
+ { (exit 1); exit 1; }; }
+ else
+ ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+ fi
+done
+
+ac_eof=`sed -n '/^CEOF[0-9]*$/s/CEOF/0/p' conf$$subs.sed`
+if test -n "$ac_eof"; then
+ ac_eof=`echo "$ac_eof" | sort -nru | sed 1q`
+ ac_eof=`expr $ac_eof + 1`
+fi
+
+cat >>$CONFIG_STATUS <<_ACEOF
+cat >"\$tmp/subs-1.sed" <<\CEOF$ac_eof
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b end
+_ACEOF
+sed '
+s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g
+s/^/s,@/; s/!/@,|#_!!_#|/
+:n
+t n
+s/'"$ac_delim"'$/,g/; t
+s/$/\\/; p
+N; s/^.*\n//; s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g; b n
+' >>$CONFIG_STATUS <conf$$subs.sed
+rm -f conf$$subs.sed
+cat >>$CONFIG_STATUS <<_ACEOF
+:end
+s/|#_!!_#|//g
+CEOF$ac_eof
+_ACEOF
+
+
+# VPATH may cause trouble with some makes, so we remove $(srcdir),
+# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+ ac_vpsub='/^[ ]*VPATH[ ]*=/{
+s/:*\$(srcdir):*/:/
+s/:*\${srcdir}:*/:/
+s/:*@srcdir@:*/:/
+s/^\([^=]*=[ ]*\):*/\1/
+s/:*$//
+s/^[^=]*=[ ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+fi # test -n "$CONFIG_FILES"
+
+
+for ac_tag in :F $CONFIG_FILES :H $CONFIG_HEADERS
+do
+ case $ac_tag in
+ :[FHLC]) ac_mode=$ac_tag; continue;;
+ esac
+ case $ac_mode$ac_tag in
+ :[FHL]*:*);;
+ :L* | :C*:*) { { echo "$as_me:$LINENO: error: Invalid tag $ac_tag." >&5
+echo "$as_me: error: Invalid tag $ac_tag." >&2;}
+ { (exit 1); exit 1; }; };;
+ :[FH]-) ac_tag=-:-;;
+ :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+ esac
+ ac_save_IFS=$IFS
+ IFS=:
+ set x $ac_tag
+ IFS=$ac_save_IFS
+ shift
+ ac_file=$1
+ shift
+
+ case $ac_mode in
+ :L) ac_source=$1;;
+ :[FH])
+ ac_file_inputs=
+ for ac_f
+ do
+ case $ac_f in
+ -) ac_f="$tmp/stdin";;
+ *) # Look for the file first in the build tree, then in the source tree
+ # (if the path is not absolute). The absolute path cannot be DOS-style,
+ # because $ac_f cannot contain `:'.
+ test -f "$ac_f" ||
+ case $ac_f in
+ [\\/$]*) false;;
+ *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+ esac ||
+ { { echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5
+echo "$as_me: error: cannot find input file: $ac_f" >&2;}
+ { (exit 1); exit 1; }; };;
+ esac
+ ac_file_inputs="$ac_file_inputs $ac_f"
+ done
+
+ # Let's still pretend it is `configure' which instantiates (i.e., don't
+ # use $as_me), people would be surprised to read:
+ # /* config.h. Generated by config.status. */
+ configure_input="Generated from "`IFS=:
+ echo $* | sed 's|^[^:]*/||;s|:[^:]*/|, |g'`" by configure."
+ if test x"$ac_file" != x-; then
+ configure_input="$ac_file. $configure_input"
+ { echo "$as_me:$LINENO: creating $ac_file" >&5
+echo "$as_me: creating $ac_file" >&6;}
+ fi
+
+ case $ac_tag in
+ *:-:* | *:-) cat >"$tmp/stdin";;
+ esac
+ ;;
+ esac
+
+ ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$ac_file" : 'X\(//\)[^/]' \| \
+ X"$ac_file" : 'X\(//\)$' \| \
+ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+echo X"$ac_file" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ { as_dir="$ac_dir"
+ case $as_dir in #(
+ -*) as_dir=./$as_dir;;
+ esac
+ test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || {
+ as_dirs=
+ while :; do
+ case $as_dir in #(
+ *\'*) as_qdir=`echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #(
+ *) as_qdir=$as_dir;;
+ esac
+ as_dirs="'$as_qdir' $as_dirs"
+ as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$as_dir" : 'X\(//\)[^/]' \| \
+ X"$as_dir" : 'X\(//\)$' \| \
+ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+echo X"$as_dir" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'`
+ test -d "$as_dir" && break
+ done
+ test -z "$as_dirs" || eval "mkdir $as_dirs"
+ } || test -d "$as_dir" || { { echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5
+echo "$as_me: error: cannot create directory $as_dir" >&2;}
+ { (exit 1); exit 1; }; }; }
+ ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+ ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
+ # A ".." for each directory in $ac_dir_suffix.
+ ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'`
+ case $ac_top_builddir_sub in
+ "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *) ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+ esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+ .) # We are building in place.
+ ac_srcdir=.
+ ac_top_srcdir=$ac_top_builddir_sub
+ ac_abs_top_srcdir=$ac_pwd ;;
+ [\\/]* | ?:[\\/]* ) # Absolute name.
+ ac_srcdir=$srcdir$ac_dir_suffix;
+ ac_top_srcdir=$srcdir
+ ac_abs_top_srcdir=$srcdir ;;
+ *) # Relative name.
+ ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+ ac_top_srcdir=$ac_top_build_prefix$srcdir
+ ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+ case $ac_mode in
+ :F)
+ #
+ # CONFIG_FILE
+ #
+
+ case $INSTALL in
+ [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+ *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+ esac
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+
+case `sed -n '/datarootdir/ {
+ p
+ q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p
+' $ac_file_inputs` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+ { echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF
+ ac_datarootdir_hack='
+ s&@datadir@&$datadir&g
+ s&@docdir@&$docdir&g
+ s&@infodir@&$infodir&g
+ s&@localedir@&$localedir&g
+ s&@mandir@&$mandir&g
+ s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF
+ sed "$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s&@configure_input@&$configure_input&;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+$ac_datarootdir_hack
+" $ac_file_inputs | sed -f "$tmp/subs-1.sed" >$tmp/out
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
+ { echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined." >&5
+echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined. Please make sure it is defined." >&2;}
+
+ rm -f "$tmp/stdin"
+ case $ac_file in
+ -) cat "$tmp/out"; rm -f "$tmp/out";;
+ *) rm -f "$ac_file"; mv "$tmp/out" $ac_file;;
+ esac
+ ;;
+ :H)
+ #
+ # CONFIG_HEADER
+ #
+_ACEOF
+
+# Transform confdefs.h into a sed script `conftest.defines', that
+# substitutes the proper values into config.h.in to produce config.h.
+rm -f conftest.defines conftest.tail
+# First, append a space to every undef/define line, to ease matching.
+echo 's/$/ /' >conftest.defines
+# Then, protect against being on the right side of a sed subst, or in
+# an unquoted here document, in config.status. If some macros were
+# called several times there might be several #defines for the same
+# symbol, which is useless. But do not sort them, since the last
+# AC_DEFINE must be honored.
+ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
+# These sed commands are passed to sed as "A NAME B PARAMS C VALUE D", where
+# NAME is the cpp macro being defined, VALUE is the value it is being given.
+# PARAMS is the parameter list in the macro definition--in most cases, it's
+# just an empty string.
+ac_dA='s,^\\([ #]*\\)[^ ]*\\([ ]*'
+ac_dB='\\)[ (].*,\\1define\\2'
+ac_dC=' '
+ac_dD=' ,'
+
+uniq confdefs.h |
+ sed -n '
+ t rset
+ :rset
+ s/^[ ]*#[ ]*define[ ][ ]*//
+ t ok
+ d
+ :ok
+ s/[\\&,]/\\&/g
+ s/^\('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/ '"$ac_dA"'\1'"$ac_dB"'\2'"${ac_dC}"'\3'"$ac_dD"'/p
+ s/^\('"$ac_word_re"'\)[ ]*\(.*\)/'"$ac_dA"'\1'"$ac_dB$ac_dC"'\2'"$ac_dD"'/p
+ ' >>conftest.defines
+
+# Remove the space that was appended to ease matching.
+# Then replace #undef with comments. This is necessary, for
+# example, in the case of _POSIX_SOURCE, which is predefined and required
+# on some systems where configure will not decide to define it.
+# (The regexp can be short, since the line contains either #define or #undef.)
+echo 's/ $//
+s,^[ #]*u.*,/* & */,' >>conftest.defines
+
+# Break up conftest.defines:
+ac_max_sed_lines=50
+
+# First sed command is: sed -f defines.sed $ac_file_inputs >"$tmp/out1"
+# Second one is: sed -f defines.sed "$tmp/out1" >"$tmp/out2"
+# Third one will be: sed -f defines.sed "$tmp/out2" >"$tmp/out1"
+# et cetera.
+ac_in='$ac_file_inputs'
+ac_out='"$tmp/out1"'
+ac_nxt='"$tmp/out2"'
+
+while :
+do
+ # Write a here document:
+ cat >>$CONFIG_STATUS <<_ACEOF
+ # First, check the format of the line:
+ cat >"\$tmp/defines.sed" <<\\CEOF
+/^[ ]*#[ ]*undef[ ][ ]*$ac_word_re[ ]*\$/b def
+/^[ ]*#[ ]*define[ ][ ]*$ac_word_re[( ]/b def
+b
+:def
+_ACEOF
+ sed ${ac_max_sed_lines}q conftest.defines >>$CONFIG_STATUS
+ echo 'CEOF
+ sed -f "$tmp/defines.sed"' "$ac_in >$ac_out" >>$CONFIG_STATUS
+ ac_in=$ac_out; ac_out=$ac_nxt; ac_nxt=$ac_in
+ sed 1,${ac_max_sed_lines}d conftest.defines >conftest.tail
+ grep . conftest.tail >/dev/null || break
+ rm -f conftest.defines
+ mv conftest.tail conftest.defines
+done
+rm -f conftest.defines conftest.tail
+
+echo "ac_result=$ac_in" >>$CONFIG_STATUS
+cat >>$CONFIG_STATUS <<\_ACEOF
+ if test x"$ac_file" != x-; then
+ echo "/* $configure_input */" >"$tmp/config.h"
+ cat "$ac_result" >>"$tmp/config.h"
+ if diff $ac_file "$tmp/config.h" >/dev/null 2>&1; then
+ { echo "$as_me:$LINENO: $ac_file is unchanged" >&5
+echo "$as_me: $ac_file is unchanged" >&6;}
+ else
+ rm -f $ac_file
+ mv "$tmp/config.h" $ac_file
+ fi
+ else
+ echo "/* $configure_input */"
+ cat "$ac_result"
+ fi
+ rm -f "$tmp/out12"
+ ;;
+
+
+ esac
+
+done # for ac_tag
+
+
+{ (exit 0); exit 0; }
+_ACEOF
+chmod +x $CONFIG_STATUS
+ac_clean_files=$ac_clean_files_save
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded. So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status. When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+ ac_cs_success=:
+ ac_config_status_args=
+ test "$silent" = yes &&
+ ac_config_status_args="$ac_config_status_args --quiet"
+ exec 5>/dev/null
+ $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+ exec 5>>config.log
+ # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+ # would make configure fail if this is the last instruction.
+ $ac_cs_success || { (exit 1); exit 1; }
+fi
+
diff --git a/lib/liblzf-3.5/configure.ac b/lib/liblzf-3.5/configure.ac
new file mode 100644
index 0000000000..58316a01b3
--- /dev/null
+++ b/lib/liblzf-3.5/configure.ac
@@ -0,0 +1,25 @@
+AC_INIT
+AC_CONFIG_SRCDIR([lzfP.h])
+
+AC_CONFIG_HEADER(config.h)
+
+AC_GNU_SOURCE
+AC_SYS_LARGEFILE
+AC_PROG_CC
+AC_PROG_RANLIB
+AC_PROG_INSTALL
+AC_HEADER_STDC
+
+AC_C_CONST
+AC_C_INLINE
+AC_CHECK_HEADERS(getopt.h)
+AC_CHECK_FUNCS(getopt_long)
+
+if test "$GCC" = yes; then
+ CFLAGS="$CFLAGS -O3 -funroll-all-loops"
+else
+ AC_MSG_RESULT(no gcc)
+fi
+
+AC_CONFIG_FILES([Makefile])
+AC_OUTPUT
diff --git a/lib/liblzf-3.5/crc32.h b/lib/liblzf-3.5/crc32.h
new file mode 100644
index 0000000000..cf8f6d4097
--- /dev/null
+++ b/lib/liblzf-3.5/crc32.h
@@ -0,0 +1,65 @@
+#ifndef CRC32_H
+#define CRC32_H
+
+/* crc32 0xdebb20e3 table and supplementary functions. */
+
+static const u32 crc_32_tab[] =
+{
+ 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL,
+ 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL,
+ 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL,
+ 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL,
+ 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL,
+ 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL,
+ 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL,
+ 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL,
+ 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL,
+ 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL,
+ 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL,
+ 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL,
+ 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL,
+ 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL,
+ 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL,
+ 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL,
+ 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL,
+ 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL,
+ 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL,
+ 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL,
+ 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL,
+ 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL,
+ 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL,
+ 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL,
+ 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL,
+ 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL,
+ 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL,
+ 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL,
+ 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL,
+ 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL,
+ 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL,
+ 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL,
+ 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL,
+ 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL,
+ 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL,
+ 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL,
+ 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL,
+ 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL,
+ 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL,
+ 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL,
+ 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL,
+ 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL,
+ 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL,
+ 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL,
+ 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL,
+ 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL,
+ 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL,
+ 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL,
+ 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL,
+ 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL,
+ 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL,
+ 0x2d02ef8dL
+};
+
+#define crc32(crc,byte) (crc_32_tab[(u8)(crc) ^ (u8)(byte)] ^ ((crc) >> 8))
+
+#endif
+
diff --git a/lib/liblzf-3.5/cs/CLZF.cs b/lib/liblzf-3.5/cs/CLZF.cs
new file mode 100644
index 0000000000..4e1b5d50b8
--- /dev/null
+++ b/lib/liblzf-3.5/cs/CLZF.cs
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2005 Oren J. Maurice <oymaurice@hazorea.org.il>
+ *
+ * Redistribution and use in source and binary forms, with or without modifica-
+ * tion, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
+ * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
+ * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
+ * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License version 2 (the "GPL"), in which case the
+ * provisions of the GPL are applicable instead of the above. If you wish to
+ * allow the use of your version of this file only under the terms of the
+ * GPL and not to allow others to use your version of this file under the
+ * BSD license, indicate your decision by deleting the provisions above and
+ * replace them with the notice and other provisions required by the GPL. If
+ * you do not delete the provisions above, a recipient may use your version
+ * of this file under either the BSD or the GPL.
+ */
+
+using System;
+
+namespace LZF.NET
+{
+
+ /// <summary>
+ /// Summary description for CLZF.
+ /// </summary>
+ public class CLZF
+ {
+ // CRC32 data & function
+ UInt32 []crc_32_tab = new UInt32[256]
+ {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
+ 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
+ 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
+ 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
+ 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+ 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
+ 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
+ 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
+ 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
+ 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
+ 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
+ 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
+ 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
+ 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+ 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
+ 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
+ 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
+ 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
+ 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
+ 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
+ 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
+ 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
+ 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+ 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
+ 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
+ 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
+ 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
+ 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
+ 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
+ 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
+ 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
+ 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
+ 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
+ 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
+ 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
+ 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
+ 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
+ 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
+ 0x2d02ef8d
+ };
+
+ public UInt32 crc32(UInt32 OldCRC,byte NewData)
+ {
+ return crc_32_tab[(OldCRC & 0xff) ^ NewData] ^ (OldCRC >> 8);
+ }
+
+
+ /// <summary>
+ /// LZF Compressor
+ /// </summary>
+
+ UInt32 HLOG=14;
+ UInt32 HSIZE=(1<<14);
+
+ /*
+ * don't play with this unless you benchmark!
+ * decompression is not dependent on the hash function
+ * the hashing function might seem strange, just believe me
+ * it works ;)
+ */
+ UInt32 MAX_LIT=(1 << 5);
+ UInt32 MAX_OFF=(1 << 13);
+ UInt32 MAX_REF=((1 << 8) + (1 << 3));
+
+ UInt32 FRST(byte[] Array,UInt32 ptr)
+ {
+ return (UInt32)(((Array[ptr]) << 8) | Array[ptr+1]);
+ }
+
+ UInt32 NEXT(UInt32 v,byte[] Array,UInt32 ptr)
+ {
+ return ((v) << 8) | Array[ptr+2];
+ }
+
+ UInt32 IDX(UInt32 h)
+ {
+ return ((h ^ (h << 5)) >> (int)(((3*8 - HLOG)) - h*5) & (HSIZE - 1));
+ }
+
+ /*
+ * compressed format
+ *
+ * 000LLLLL <L+1> ; literal
+ * LLLOOOOO oooooooo ; backref L
+ * 111OOOOO LLLLLLLL oooooooo ; backref L+7
+ *
+ */
+
+ public int lzf_compress (byte[] in_data, int in_len,byte[] out_data, int out_len)
+ {
+ int c;
+ long []htab=new long[1<<14];
+ for (c=0;c<1<<14;c++)
+ {
+ htab[c]=0;
+ }
+
+ long hslot;
+ UInt32 iidx = 0;
+ UInt32 oidx = 0;
+ //byte *in_end = ip + in_len;
+ //byte *out_end = op + out_len;
+ long reference;
+
+ UInt32 hval = FRST (in_data,iidx);
+ long off;
+ int lit = 0;
+
+ for (;;)
+ {
+ if (iidx < in_len - 2)
+ {
+ hval = NEXT (hval, in_data,iidx);
+ hslot = IDX (hval);
+ reference = htab[hslot];
+ htab[hslot] = (long)iidx;
+
+ if ((off = iidx - reference - 1) < MAX_OFF
+ && iidx + 4 < in_len
+ && reference > 0
+ && in_data[reference+0] == in_data[iidx+0]
+ && in_data[reference+1] == in_data[iidx+1]
+ && in_data[reference+2] == in_data[iidx+2]
+ )
+ {
+ /* match found at *reference++ */
+ UInt32 len = 2;
+ UInt32 maxlen = (UInt32)in_len - iidx - len;
+ maxlen = maxlen > MAX_REF ? MAX_REF : maxlen;
+
+ if (oidx + lit + 1 + 3 >= out_len)
+ return 0;
+
+ do
+ len++;
+ while (len < maxlen && in_data[reference+len] == in_data[iidx+len]);
+
+ if (lit!=0)
+ {
+ out_data[oidx++] = (byte)(lit - 1);
+ lit = -lit;
+ do
+ out_data[oidx++] = in_data[iidx+lit];
+ while ((++lit)!=0);
+ }
+
+ len -= 2;
+ iidx++;
+
+ if (len < 7)
+ {
+ out_data[oidx++] = (byte)((off >> 8) + (len << 5));
+ }
+ else
+ {
+ out_data[oidx++] = (byte)((off >> 8) + ( 7 << 5));
+ out_data[oidx++] = (byte)(len - 7);
+ }
+
+ out_data[oidx++] = (byte)off;
+
+ iidx += len-1;
+ hval = FRST (in_data,iidx);
+
+ hval = NEXT (hval,in_data, iidx);
+ htab[IDX (hval)] = iidx;
+ iidx++;
+
+ hval = NEXT (hval, in_data,iidx);
+ htab[IDX (hval)] = iidx;
+ iidx++;
+ continue;
+ }
+ }
+ else if (iidx == in_len)
+ break;
+
+ /* one more literal byte we must copy */
+ lit++;
+ iidx++;
+
+ if (lit == MAX_LIT)
+ {
+ if (oidx + 1 + MAX_LIT >= out_len)
+ return 0;
+
+ out_data[oidx++] = (byte)(MAX_LIT - 1);
+ lit = -lit;
+ do
+ out_data[oidx++] = in_data[iidx+lit];
+ while ((++lit)!=0);
+ }
+ }
+
+ if (lit!=0)
+ {
+ if (oidx + lit + 1 >= out_len)
+ return 0;
+
+ out_data[oidx++] = (byte)(lit - 1);
+ lit = -lit;
+ do
+ out_data[oidx++] = in_data[iidx+lit];
+ while ((++lit)!=0);
+ }
+
+ return (int)oidx;
+ }
+
+ /// <summary>
+ /// LZF Decompressor
+ /// </summary>
+ public int lzf_decompress ( byte[] in_data, int in_len, byte[] out_data, int out_len)
+ {
+ UInt32 iidx=0;
+ UInt32 oidx=0;
+
+ do
+ {
+ UInt32 ctrl = in_data[iidx++];
+
+ if (ctrl < (1 << 5)) /* literal run */
+ {
+ ctrl++;
+
+ if (oidx + ctrl > out_len)
+ {
+ //SET_ERRNO (E2BIG);
+ return 0;
+ }
+
+ do
+ out_data[oidx++] = in_data[iidx++];
+ while ((--ctrl)!=0);
+ }
+ else /* back reference */
+ {
+ UInt32 len = ctrl >> 5;
+
+ int reference = (int)(oidx - ((ctrl & 0x1f) << 8) - 1);
+
+ if (len == 7)
+ len += in_data[iidx++];
+
+ reference -= in_data[iidx++];
+
+ if (oidx + len + 2 > out_len)
+ {
+ //SET_ERRNO (E2BIG);
+ return 0;
+ }
+
+ if (reference < 0)
+ {
+ //SET_ERRNO (EINVAL);
+ return 0;
+ }
+
+ out_data[oidx++]=out_data[reference++];
+ out_data[oidx++]=out_data[reference++];
+
+ do
+ out_data[oidx++]=out_data[reference++];
+ while ((--len)!=0);
+ }
+ }
+ while (iidx < in_len);
+
+ return (int)oidx;
+ }
+
+ public CLZF()
+ {
+ //
+ // TODO: Add ructor logic here
+ //
+ }
+ }
+}
+
diff --git a/lib/liblzf-3.5/cs/README b/lib/liblzf-3.5/cs/README
new file mode 100644
index 0000000000..9964aff103
--- /dev/null
+++ b/lib/liblzf-3.5/cs/README
@@ -0,0 +1,7 @@
+The C♯ implementation of the LZF en-/decoder functions in this
+directory was written (and is maintained) by
+
+ Oren J. Maurice <oymaurice@hazorea.org.il>.
+
+If you have any questions or improvements, you should contact the
+original author (and maybe CC me, Marc Lehmann <liblzf@schmorp.de>).
diff --git a/lib/liblzf-3.5/install-sh b/lib/liblzf-3.5/install-sh
new file mode 100755
index 0000000000..e9de23842d
--- /dev/null
+++ b/lib/liblzf-3.5/install-sh
@@ -0,0 +1,251 @@
+#!/bin/sh
+#
+# install - install a program, script, or datafile
+# This comes from X11R5 (mit/util/scripts/install.sh).
+#
+# Copyright 1991 by the Massachusetts Institute of Technology
+#
+# Permission to use, copy, modify, distribute, and sell this software and its
+# documentation for any purpose is hereby granted without fee, provided that
+# the above copyright notice appear in all copies and that both that
+# copyright notice and this permission notice appear in supporting
+# documentation, and that the name of M.I.T. not be used in advertising or
+# publicity pertaining to distribution of the software without specific,
+# written prior permission. M.I.T. makes no representations about the
+# suitability of this software for any purpose. It is provided "as is"
+# without express or implied warranty.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch. It can only install one file at a time, a restriction
+# shared with many OS's install programs.
+
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit="${DOITPROG-}"
+
+
+# put in absolute paths if you don't have them in your path; or use env. vars.
+
+mvprog="${MVPROG-mv}"
+cpprog="${CPPROG-cp}"
+chmodprog="${CHMODPROG-chmod}"
+chownprog="${CHOWNPROG-chown}"
+chgrpprog="${CHGRPPROG-chgrp}"
+stripprog="${STRIPPROG-strip}"
+rmprog="${RMPROG-rm}"
+mkdirprog="${MKDIRPROG-mkdir}"
+
+transformbasename=""
+transform_arg=""
+instcmd="$mvprog"
+chmodcmd="$chmodprog 0755"
+chowncmd=""
+chgrpcmd=""
+stripcmd=""
+rmcmd="$rmprog -f"
+mvcmd="$mvprog"
+src=""
+dst=""
+dir_arg=""
+
+while [ x"$1" != x ]; do
+ case $1 in
+ -c) instcmd="$cpprog"
+ shift
+ continue;;
+
+ -d) dir_arg=true
+ shift
+ continue;;
+
+ -m) chmodcmd="$chmodprog $2"
+ shift
+ shift
+ continue;;
+
+ -o) chowncmd="$chownprog $2"
+ shift
+ shift
+ continue;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift
+ shift
+ continue;;
+
+ -s) stripcmd="$stripprog"
+ shift
+ continue;;
+
+ -t=*) transformarg=`echo $1 | sed 's/-t=//'`
+ shift
+ continue;;
+
+ -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
+ shift
+ continue;;
+
+ *) if [ x"$src" = x ]
+ then
+ src=$1
+ else
+ # this colon is to work around a 386BSD /bin/sh bug
+ :
+ dst=$1
+ fi
+ shift
+ continue;;
+ esac
+done
+
+if [ x"$src" = x ]
+then
+ echo "install: no input file specified"
+ exit 1
+else
+ true
+fi
+
+if [ x"$dir_arg" != x ]; then
+ dst=$src
+ src=""
+
+ if [ -d $dst ]; then
+ instcmd=:
+ chmodcmd=""
+ else
+ instcmd=mkdir
+ fi
+else
+
+# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
+# might cause directories to be created, which would be especially bad
+# if $src (and thus $dsttmp) contains '*'.
+
+ if [ -f $src -o -d $src ]
+ then
+ true
+ else
+ echo "install: $src does not exist"
+ exit 1
+ fi
+
+ if [ x"$dst" = x ]
+ then
+ echo "install: no destination specified"
+ exit 1
+ else
+ true
+ fi
+
+# If destination is a directory, append the input filename; if your system
+# does not like double slashes in filenames, you may need to add some logic
+
+ if [ -d $dst ]
+ then
+ dst="$dst"/`basename $src`
+ else
+ true
+ fi
+fi
+
+## this sed command emulates the dirname command
+dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
+
+# Make sure that the destination directory exists.
+# this part is taken from Noah Friedman's mkinstalldirs script
+
+# Skip lots of stat calls in the usual case.
+if [ ! -d "$dstdir" ]; then
+defaultIFS='
+'
+IFS="${IFS-${defaultIFS}}"
+
+oIFS="${IFS}"
+# Some sh's can't handle IFS=/ for some reason.
+IFS='%'
+set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
+IFS="${oIFS}"
+
+pathcomp=''
+
+while [ $# -ne 0 ] ; do
+ pathcomp="${pathcomp}${1}"
+ shift
+
+ if [ ! -d "${pathcomp}" ] ;
+ then
+ $mkdirprog "${pathcomp}"
+ else
+ true
+ fi
+
+ pathcomp="${pathcomp}/"
+done
+fi
+
+if [ x"$dir_arg" != x ]
+then
+ $doit $instcmd $dst &&
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
+else
+
+# If we're going to rename the final executable, determine the name now.
+
+ if [ x"$transformarg" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ dstfile=`basename $dst $transformbasename |
+ sed $transformarg`$transformbasename
+ fi
+
+# don't allow the sed command to completely eliminate the filename
+
+ if [ x"$dstfile" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ true
+ fi
+
+# Make a temp file name in the proper directory.
+
+ dsttmp=$dstdir/#inst.$$#
+
+# Move or copy the file name to the temp name
+
+ $doit $instcmd $src $dsttmp &&
+
+ trap "rm -f ${dsttmp}" 0 &&
+
+# and set any options; do chmod last to preserve setuid bits
+
+# If any of these fail, we abort the whole thing. If we want to
+# ignore errors from any of these, just make sure not to ignore
+# errors from the above "$doit $instcmd $src $dsttmp" command.
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
+
+# Now rename the file to the real destination.
+
+ $doit $rmcmd -f $dstdir/$dstfile &&
+ $doit $mvcmd $dsttmp $dstdir/$dstfile
+
+fi &&
+
+
+exit 0
diff --git a/lib/liblzf-3.5/lzf.c b/lib/liblzf-3.5/lzf.c
new file mode 100644
index 0000000000..bedfdb6fea
--- /dev/null
+++ b/lib/liblzf-3.5/lzf.c
@@ -0,0 +1,537 @@
+/*
+ * Copyright (c) 2006 Stefan Traby <stefan@hello-penguin.com>
+ *
+ * Redistribution and use in source and binary forms, with or without modifica-
+ * tion, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
+ * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
+ * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
+ * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License ("GPL") version 2 or any later version,
+ * in which case the provisions of the GPL are applicable instead of
+ * the above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the BSD license, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file under
+ * either the BSD or the GPL.
+ */
+
+#include "config.h"
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <limits.h>
+#include "lzf.h"
+
+#ifdef HAVE_GETOPT_H
+# include <getopt.h>
+#endif
+
+#define BLOCKSIZE (1024 * 64 - 1)
+#define MAX_BLOCKSIZE BLOCKSIZE
+
+typedef unsigned char u8;
+
+static off_t nr_read, nr_written;
+
+static const char *imagename;
+static enum { compress, uncompress, lzcat } mode = compress;
+static int verbose = 0;
+static int force = 0;
+static long blocksize = BLOCKSIZE;
+
+#ifdef HAVE_GETOPT_LONG
+
+ struct option longopts[] = {
+ {"compress", 0, 0, 'c'},
+ {"decompress", 0, 0, 'd'},
+ {"uncompress", 0, 0, 'd'},
+ {"force", 0, 0, 'f'},
+ {"help", 0, 0, 'h'},
+ {"verbose", 0, 0, 'v'},
+ {"blocksize", 1, 0, 'b'},
+ {0, 0, 0, 0}
+ };
+
+ static const char *opt =
+ "-c --compress compress\n"
+ "-d --decompress decompress\n"
+ "-f --force force overwrite of output file\n"
+ "-h --help give this help\n" "-v --verbose verbose mode\n" "-b # --blocksize # set blocksize\n" "\n";
+
+#else
+
+ static const char *opt =
+ "-c compress\n"
+ "-d decompress\n"
+ "-f force overwrite of output file\n"
+ "-h give this help\n"
+ "-v verbose mode\n"
+ "-b # set blocksize\n"
+ "\n";
+
+#endif
+
+static void
+usage (int rc)
+{
+ fprintf (stderr, "\n"
+ "lzf, a very lightweight compression/decompression utility written by Stefan Traby.\n"
+ "uses liblzf written by Marc Lehmann <schmorp@schmorp.de> You can find more info at\n"
+ "http://liblzf.plan9.de/\n"
+ "\n"
+ "usage: lzf [-dufhvb] [file ...]\n"
+ " unlzf [file ...]\n"
+ " lzcat [file ...]\n"
+ "\n%s",
+ opt);
+
+ exit (rc);
+}
+
+static inline ssize_t
+rread (int fd, void *buf, size_t len)
+{
+ ssize_t rc = 0, offset = 0;
+ char *p = buf;
+
+ while (len && (rc = read (fd, &p[offset], len)) > 0)
+ {
+ offset += rc;
+ len -= rc;
+ }
+
+ nr_read += offset;
+
+ if (rc < 0)
+ return rc;
+
+ return offset;
+}
+
+/* returns 0 if all written else -1 */
+static inline ssize_t
+wwrite (int fd, void *buf, size_t len)
+{
+ ssize_t rc;
+ char *b = buf;
+ size_t l = len;
+
+ while (l)
+ {
+ rc = write (fd, b, l);
+ if (rc < 0)
+ {
+ fprintf (stderr, "%s: write error: ", imagename);
+ perror ("");
+ return -1;
+ }
+
+ l -= rc;
+ b += rc;
+ }
+
+ nr_written += len;
+ return 0;
+}
+
+/*
+ * Anatomy: an lzf file consists of any number of blocks in the following format:
+ *
+ * \x00 EOF (optional)
+ * "ZV\0" 2-byte-usize <uncompressed data>
+ * "ZV\1" 2-byte-csize 2-byte-usize <compressed data>
+ * "ZV\2" 4-byte-crc32-0xdebb20e3 (NYI)
+ */
+
+
+#define TYPE0_HDR_SIZE 5
+#define TYPE1_HDR_SIZE 7
+#define MAX_HDR_SIZE 7
+#define MIN_HDR_SIZE 5
+
+static int
+compress_fd (int from, int to)
+{
+ ssize_t us, cs, len;
+ u8 buf1[MAX_BLOCKSIZE + MAX_HDR_SIZE + 16];
+ u8 buf2[MAX_BLOCKSIZE + MAX_HDR_SIZE + 16];
+ u8 *header;
+
+ nr_read = nr_written = 0;
+ while ((us = rread (from, &buf1[MAX_HDR_SIZE], blocksize)) > 0)
+ {
+ cs = lzf_compress (&buf1[MAX_HDR_SIZE], us, &buf2[MAX_HDR_SIZE], us > 4 ? us - 4 : us);
+ if (cs)
+ {
+ header = &buf2[MAX_HDR_SIZE - TYPE1_HDR_SIZE];
+ header[0] = 'Z';
+ header[1] = 'V';
+ header[2] = 1;
+ header[3] = cs >> 8;
+ header[4] = cs & 0xff;
+ header[5] = us >> 8;
+ header[6] = us & 0xff;
+ len = cs + TYPE1_HDR_SIZE;
+ }
+ else
+ { // write uncompressed
+ header = &buf1[MAX_HDR_SIZE - TYPE0_HDR_SIZE];
+ header[0] = 'Z';
+ header[1] = 'V';
+ header[2] = 0;
+ header[3] = us >> 8;
+ header[4] = us & 0xff;
+ len = us + TYPE0_HDR_SIZE;
+ }
+
+ if (wwrite (to, header, len) == -1)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+uncompress_fd (int from, int to)
+{
+ u8 header[MAX_HDR_SIZE];
+ u8 buf1[MAX_BLOCKSIZE + MAX_HDR_SIZE + 16];
+ u8 buf2[MAX_BLOCKSIZE + MAX_HDR_SIZE + 16];
+ u8 *p;
+ int l, rd;
+ ssize_t rc, cs, us, bytes, over = 0;
+
+ nr_read = nr_written = 0;
+ while (1)
+ {
+ rc = rread (from, header + over, MAX_HDR_SIZE - over);
+ if (rc < 0)
+ {
+ fprintf (stderr, "%s: read error: ", imagename);
+ perror ("");
+ return -1;
+ }
+
+ rc += over;
+ over = 0;
+ if (!rc || header[0] == 0)
+ return 0;
+
+ if (rc < MIN_HDR_SIZE || header[0] != 'Z' || header[1] != 'V')
+ {
+ fprintf (stderr, "%s: invalid data stream - magic not found or short header\n", imagename);
+ return -1;
+ }
+
+ switch (header[2])
+ {
+ case 0:
+ cs = -1;
+ us = (header[3] << 8) | header[4];
+ p = &header[TYPE0_HDR_SIZE];
+ break;
+ case 1:
+ if (rc < TYPE1_HDR_SIZE)
+ {
+ goto short_read;
+ }
+ cs = (header[3] << 8) | header[4];
+ us = (header[5] << 8) | header[6];
+ p = &header[TYPE1_HDR_SIZE];
+ break;
+ default:
+ fprintf (stderr, "%s: unknown blocktype\n", imagename);
+ return -1;
+ }
+
+ bytes = cs == -1 ? us : cs;
+ l = &header[rc] - p;
+
+ if (l > 0)
+ memcpy (buf1, p, l);
+
+ if (l > bytes)
+ {
+ over = l - bytes;
+ memmove (header, &p[bytes], over);
+ }
+
+ p = &buf1[l];
+ rd = bytes - l;
+ if (rd > 0)
+ if ((rc = rread (from, p, rd)) != rd)
+ goto short_read;
+
+ if (cs == -1)
+ {
+ if (wwrite (to, buf1, us))
+ return -1;
+ }
+ else
+ {
+ if (lzf_decompress (buf1, cs, buf2, us) != us)
+ {
+ fprintf (stderr, "%s: decompress: invalid stream - data corrupted\n", imagename);
+ return -1;
+ }
+
+ if (wwrite (to, buf2, us))
+ return -1;
+ }
+ }
+
+ return 0;
+
+short_read:
+ fprintf (stderr, "%s: short data\n", imagename);
+ return -1;
+}
+
+static int
+open_out (const char *name)
+{
+ int fd;
+ int m = O_EXCL;
+
+ if (force)
+ m = 0;
+
+ fd = open (name, O_CREAT | O_WRONLY | O_TRUNC | m, 600);
+#if defined(__MINGW32__)
+ _setmode(fd, _O_BINARY);
+#endif
+ return fd;
+}
+
+static int
+compose_name (const char *fname, char *oname)
+{
+ char *p;
+
+ if (mode == compress)
+ {
+ if (strlen (fname) > PATH_MAX - 4)
+ {
+ fprintf (stderr, "%s: %s.lzf: name too long", imagename, fname);
+ return -1;
+ }
+
+ strcpy (oname, fname);
+ strcat (oname, ".lzf");
+ }
+ else
+ {
+ if (strlen (fname) > PATH_MAX)
+ {
+ fprintf (stderr, "%s: %s: name too long\n", imagename, fname);
+ return -1;
+ }
+
+ strcpy (oname, fname);
+ p = &oname[strlen (oname)] - 4;
+ if (p < oname || strcmp (p, ".lzf"))
+ {
+ fprintf (stderr, "%s: %s: unknown suffix\n", imagename, fname);
+ return -1;
+ }
+
+ *p = 0;
+ }
+
+ return 0;
+}
+
+static int
+run_file (const char *fname)
+{
+ int fd, fd2;
+ int rc;
+ struct stat mystat;
+ char oname[PATH_MAX + 1];
+
+ if (mode != lzcat)
+ if (compose_name (fname, oname))
+ return -1;
+
+#if !defined(__MINGW32__)
+ rc = lstat (fname, &mystat);
+#else
+ rc = stat (fname, &mystat);
+#endif
+ fd = open (fname, O_RDONLY);
+#if defined(__MINGW32__)
+ _setmode(fd, _O_BINARY);
+#endif
+ if (rc || fd == -1)
+ {
+ fprintf (stderr, "%s: %s: ", imagename, fname);
+ perror ("");
+ return -1;
+ }
+
+ if (!S_ISREG (mystat.st_mode))
+ {
+ fprintf (stderr, "%s: %s: not a regular file.\n", imagename, fname);
+ close (fd);
+ return -1;
+ }
+
+ if (mode == lzcat)
+ {
+ rc = uncompress_fd (fd, 1);
+ close (fd);
+ return rc;
+ }
+
+ fd2 = open_out (oname);
+ if (fd2 == -1)
+ {
+ fprintf (stderr, "%s: %s: ", imagename, oname);
+ perror ("");
+ close (fd);
+ return -1;
+ }
+
+ if (mode == compress)
+ {
+ rc = compress_fd (fd, fd2);
+ if (!rc && verbose)
+ fprintf (stderr, "%s: %5.1f%% -- replaced with %s\n",
+ fname, nr_read == 0 ? 0 : 100.0 - nr_written / ((double) nr_read / 100.0), oname);
+ }
+ else
+ {
+ rc = uncompress_fd (fd, fd2);
+ if (!rc && verbose)
+ fprintf (stderr, "%s: %5.1f%% -- replaced with %s\n",
+ fname, nr_written == 0 ? 0 : 100.0 - nr_read / ((double) nr_written / 100.0), oname);
+ }
+
+#if !defined(__MINGW32__)
+ fchmod (fd2, mystat.st_mode);
+#else
+ chmod (oname, mystat.st_mode);
+#endif
+ close (fd);
+ close (fd2);
+
+ if (!rc)
+ unlink (fname);
+
+ return rc;
+}
+
+int
+main (int argc, char *argv[])
+{
+ char *p = argv[0];
+ int optc;
+ int rc = 0;
+
+ errno = 0;
+ p = getenv ("LZF_BLOCKSIZE");
+ if (p)
+ {
+ blocksize = strtoul (p, 0, 0);
+ if (errno || !blocksize || blocksize > MAX_BLOCKSIZE)
+ blocksize = BLOCKSIZE;
+ }
+
+ p = strrchr (argv[0], '/');
+ imagename = p ? ++p : argv[0];
+
+ if (!strncmp (imagename, "un", 2) || !strncmp (imagename, "de", 2))
+ mode = uncompress;
+
+ if (strstr (imagename, "cat"))
+ mode = lzcat;
+
+#ifdef HAVE_GETOPT_LONG
+ while ((optc = getopt_long (argc, argv, "cdfhvb:", longopts, 0)) != -1)
+#else
+ while ((optc = getopt (argc, argv, "cdfhvb:")) != -1)
+#endif
+ {
+ switch (optc)
+ {
+ case 'c':
+ mode = compress;
+ break;
+ case 'd':
+ mode = uncompress;
+ break;
+ case 'f':
+ force = 1;
+ break;
+ case 'h':
+ usage (0);
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ case 'b':
+ errno = 0;
+ blocksize = strtoul (optarg, 0, 0);
+ if (errno || !blocksize || blocksize > MAX_BLOCKSIZE)
+ blocksize = BLOCKSIZE;
+ break;
+ default:
+ usage (1);
+ break;
+ }
+ }
+
+ if (optind == argc)
+ { // stdin stdout
+ if (!force)
+ {
+ if ((mode == uncompress || mode == lzcat) && isatty (0))
+ {
+ fprintf (stderr, "%s: compressed data not read from a terminal. Use -f to force decompression.\n", imagename);
+ exit (1);
+ }
+ if (mode == compress && isatty (1))
+ {
+ fprintf (stderr, "%s: compressed data not written to a terminal. Use -f to force compression.\n", imagename);
+ exit (1);
+ }
+ }
+
+ if (mode == compress)
+ rc = compress_fd (0, 1);
+ else
+ rc = uncompress_fd (0, 1);
+
+ exit (rc ? 1 : 0);
+ }
+
+ while (optind < argc)
+ rc |= run_file (argv[optind++]);
+
+ exit (rc ? 1 : 0);
+}
+
diff --git a/lib/liblzf-3.5/lzf.h b/lib/liblzf-3.5/lzf.h
new file mode 100644
index 0000000000..919b6e6be2
--- /dev/null
+++ b/lib/liblzf-3.5/lzf.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2000-2008 Marc Alexander Lehmann <schmorp@schmorp.de>
+ *
+ * Redistribution and use in source and binary forms, with or without modifica-
+ * tion, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
+ * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
+ * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
+ * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License ("GPL") version 2 or any later version,
+ * in which case the provisions of the GPL are applicable instead of
+ * the above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the BSD license, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file under
+ * either the BSD or the GPL.
+ */
+
+#ifndef LZF_H
+#define LZF_H
+
+/***********************************************************************
+**
+** lzf -- an extremely fast/free compression/decompression-method
+** http://liblzf.plan9.de/
+**
+** This algorithm is believed to be patent-free.
+**
+***********************************************************************/
+
+#define LZF_VERSION 0x0105 /* 1.5, API version */
+
+/*
+ * Compress in_len bytes stored at the memory block starting at
+ * in_data and write the result to out_data, up to a maximum length
+ * of out_len bytes.
+ *
+ * If the output buffer is not large enough or any error occurs return 0,
+ * otherwise return the number of bytes used, which might be considerably
+ * more than in_len (but less than 104% of the original size), so it
+ * makes sense to always use out_len == in_len - 1), to ensure _some_
+ * compression, and store the data uncompressed otherwise (with a flag, of
+ * course.
+ *
+ * lzf_compress might use different algorithms on different systems and
+ * even different runs, thus might result in different compressed strings
+ * depending on the phase of the moon or similar factors. However, all
+ * these strings are architecture-independent and will result in the
+ * original data when decompressed using lzf_decompress.
+ *
+ * The buffers must not be overlapping.
+ *
+ * If the option LZF_STATE_ARG is enabled, an extra argument must be
+ * supplied which is not reflected in this header file. Refer to lzfP.h
+ * and lzf_c.c.
+ *
+ */
+unsigned int
+lzf_compress (const void *const in_data, unsigned int in_len,
+ void *out_data, unsigned int out_len);
+
+/*
+ * Decompress data compressed with some version of the lzf_compress
+ * function and stored at location in_data and length in_len. The result
+ * will be stored at out_data up to a maximum of out_len characters.
+ *
+ * If the output buffer is not large enough to hold the decompressed
+ * data, a 0 is returned and errno is set to E2BIG. Otherwise the number
+ * of decompressed bytes (i.e. the original length of the data) is
+ * returned.
+ *
+ * If an error in the compressed data is detected, a zero is returned and
+ * errno is set to EINVAL.
+ *
+ * This function is very fast, about as fast as a copying loop.
+ */
+unsigned int
+lzf_decompress (const void *const in_data, unsigned int in_len,
+ void *out_data, unsigned int out_len);
+
+#endif
+
diff --git a/lib/liblzf-3.5/lzfP.h b/lib/liblzf-3.5/lzfP.h
new file mode 100644
index 0000000000..d533f18292
--- /dev/null
+++ b/lib/liblzf-3.5/lzfP.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2000-2007 Marc Alexander Lehmann <schmorp@schmorp.de>
+ *
+ * Redistribution and use in source and binary forms, with or without modifica-
+ * tion, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
+ * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
+ * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
+ * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License ("GPL") version 2 or any later version,
+ * in which case the provisions of the GPL are applicable instead of
+ * the above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the BSD license, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file under
+ * either the BSD or the GPL.
+ */
+
+#ifndef LZFP_h
+#define LZFP_h
+
+#define STANDALONE 1 /* at the moment, this is ok. */
+
+#ifndef STANDALONE
+# include "lzf.h"
+#endif
+
+/*
+ * Size of hashtable is (1 << HLOG) * sizeof (char *)
+ * decompression is independent of the hash table size
+ * the difference between 15 and 14 is very small
+ * for small blocks (and 14 is usually a bit faster).
+ * For a low-memory/faster configuration, use HLOG == 13;
+ * For best compression, use 15 or 16 (or more, up to 23).
+ */
+#ifndef HLOG
+# define HLOG 16
+#endif
+
+/*
+ * Sacrifice very little compression quality in favour of compression speed.
+ * This gives almost the same compression as the default code, and is
+ * (very roughly) 15% faster. This is the preferred mode of operation.
+ */
+#ifndef VERY_FAST
+# define VERY_FAST 1
+#endif
+
+/*
+ * Sacrifice some more compression quality in favour of compression speed.
+ * (roughly 1-2% worse compression for large blocks and
+ * 9-10% for small, redundant, blocks and >>20% better speed in both cases)
+ * In short: when in need for speed, enable this for binary data,
+ * possibly disable this for text data.
+ */
+#ifndef ULTRA_FAST
+# define ULTRA_FAST 0
+#endif
+
+/*
+ * Unconditionally aligning does not cost very much, so do it if unsure
+ */
+#ifndef STRICT_ALIGN
+# define STRICT_ALIGN !(defined(__i386) || defined (__amd64))
+#endif
+
+/*
+ * You may choose to pre-set the hash table (might be faster on some
+ * modern cpus and large (>>64k) blocks, and also makes compression
+ * deterministic/repeatable when the configuration otherwise is the same).
+ */
+#ifndef INIT_HTAB
+# define INIT_HTAB 0
+#endif
+
+/*
+ * Avoid assigning values to errno variable? for some embedding purposes
+ * (linux kernel for example), this is neccessary. NOTE: this breaks
+ * the documentation in lzf.h.
+ */
+#ifndef AVOID_ERRNO
+# define AVOID_ERRNO 0
+#endif
+
+/*
+ * Wether to pass the LZF_STATE variable as argument, or allocate it
+ * on the stack. For small-stack environments, define this to 1.
+ * NOTE: this breaks the prototype in lzf.h.
+ */
+#ifndef LZF_STATE_ARG
+# define LZF_STATE_ARG 0
+#endif
+
+/*
+ * Wether to add extra checks for input validity in lzf_decompress
+ * and return EINVAL if the input stream has been corrupted. This
+ * only shields against overflowing the input buffer and will not
+ * detect most corrupted streams.
+ * This check is not normally noticable on modern hardware
+ * (<1% slowdown), but might slow down older cpus considerably.
+ */
+#ifndef CHECK_INPUT
+# define CHECK_INPUT 1
+#endif
+
+/*****************************************************************************/
+/* nothing should be changed below */
+
+typedef unsigned char u8;
+
+typedef const u8 *LZF_STATE[1 << (HLOG)];
+
+#if !STRICT_ALIGN
+/* for unaligned accesses we need a 16 bit datatype. */
+# include <limits.h>
+# if USHRT_MAX == 65535
+ typedef unsigned short u16;
+# elif UINT_MAX == 65535
+ typedef unsigned int u16;
+# else
+# undef STRICT_ALIGN
+# define STRICT_ALIGN 1
+# endif
+#endif
+
+#if ULTRA_FAST
+# if defined(VERY_FAST)
+# undef VERY_FAST
+# endif
+#endif
+
+#if INIT_HTAB
+# ifdef __cplusplus
+# include <cstring>
+# else
+# include <string.h>
+# endif
+#endif
+
+#endif
+
diff --git a/lib/liblzf-3.5/lzf_c.c b/lib/liblzf-3.5/lzf_c.c
new file mode 100644
index 0000000000..0b5d816dd0
--- /dev/null
+++ b/lib/liblzf-3.5/lzf_c.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2000-2008 Marc Alexander Lehmann <schmorp@schmorp.de>
+ *
+ * Redistribution and use in source and binary forms, with or without modifica-
+ * tion, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
+ * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
+ * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
+ * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License ("GPL") version 2 or any later version,
+ * in which case the provisions of the GPL are applicable instead of
+ * the above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the BSD license, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file under
+ * either the BSD or the GPL.
+ */
+
+#include "lzfP.h"
+
+#define HSIZE (1 << (HLOG))
+
+/*
+ * don't play with this unless you benchmark!
+ * decompression is not dependent on the hash function
+ * the hashing function might seem strange, just believe me
+ * it works ;)
+ */
+#ifndef FRST
+# define FRST(p) (((p[0]) << 8) | p[1])
+# define NEXT(v,p) (((v) << 8) | p[2])
+# if ULTRA_FAST
+# define IDX(h) ((( h >> (3*8 - HLOG)) - h ) & (HSIZE - 1))
+# elif VERY_FAST
+# define IDX(h) ((( h >> (3*8 - HLOG)) - h*5) & (HSIZE - 1))
+# else
+# define IDX(h) ((((h ^ (h << 5)) >> (3*8 - HLOG)) - h*5) & (HSIZE - 1))
+# endif
+#endif
+/*
+ * IDX works because it is very similar to a multiplicative hash, e.g.
+ * ((h * 57321 >> (3*8 - HLOG)) & (HSIZE - 1))
+ * the latter is also quite fast on newer CPUs, and compresses similarly.
+ *
+ * the next one is also quite good, albeit slow ;)
+ * (int)(cos(h & 0xffffff) * 1e6)
+ */
+
+#if 0
+/* original lzv-like hash function, much worse and thus slower */
+# define FRST(p) (p[0] << 5) ^ p[1]
+# define NEXT(v,p) ((v) << 5) ^ p[2]
+# define IDX(h) ((h) & (HSIZE - 1))
+#endif
+
+#define MAX_LIT (1 << 5)
+#define MAX_OFF (1 << 13)
+#define MAX_REF ((1 << 8) + (1 << 3))
+
+#if __GNUC__ >= 3
+# define expect(expr,value) __builtin_expect ((expr),(value))
+# define inline inline
+#else
+# define expect(expr,value) (expr)
+# define inline static
+#endif
+
+#define expect_false(expr) expect ((expr) != 0, 0)
+#define expect_true(expr) expect ((expr) != 0, 1)
+
+/*
+ * compressed format
+ *
+ * 000LLLLL <L+1> ; literal
+ * LLLooooo oooooooo ; backref L
+ * 111ooooo LLLLLLLL oooooooo ; backref L+7
+ *
+ */
+
+unsigned int
+lzf_compress (const void *const in_data, unsigned int in_len,
+ void *out_data, unsigned int out_len
+#if LZF_STATE_ARG
+ , LZF_STATE htab
+#endif
+ )
+{
+#if !LZF_STATE_ARG
+ LZF_STATE htab;
+#endif
+ const u8 **hslot;
+ const u8 *ip = (const u8 *)in_data;
+ u8 *op = (u8 *)out_data;
+ const u8 *in_end = ip + in_len;
+ u8 *out_end = op + out_len;
+ const u8 *ref;
+
+ /* off requires a type wide enough to hold a general pointer difference.
+ * ISO C doesn't have that (size_t might not be enough and ptrdiff_t only
+ * works for differences within a single object). We also assume that no
+ * no bit pattern traps. Since the only platform that is both non-POSIX
+ * and fails to support both assumptions is windows 64 bit, we make a
+ * special workaround for it.
+ */
+#if defined (WIN32) && defined (_M_X64)
+ unsigned _int64 off; /* workaround for missing POSIX compliance */
+#else
+ unsigned long off;
+#endif
+ unsigned int hval;
+ int lit;
+
+ if (!in_len || !out_len)
+ return 0;
+
+#if INIT_HTAB
+ memset (htab, 0, sizeof (htab));
+# if 0
+ for (hslot = htab; hslot < htab + HSIZE; hslot++)
+ *hslot++ = ip;
+# endif
+#endif
+
+ lit = 0; op++; /* start run */
+
+ hval = FRST (ip);
+ while (ip < in_end - 2)
+ {
+ hval = NEXT (hval, ip);
+ hslot = htab + IDX (hval);
+ ref = *hslot; *hslot = ip;
+
+ if (1
+#if INIT_HTAB
+ && ref < ip /* the next test will actually take care of this, but this is faster */
+#endif
+ && (off = ip - ref - 1) < MAX_OFF
+ && ip + 4 < in_end
+ && ref > (u8 *)in_data
+#if STRICT_ALIGN
+ && ref[0] == ip[0]
+ && ref[1] == ip[1]
+ && ref[2] == ip[2]
+#else
+ && *(u16 *)ref == *(u16 *)ip
+ && ref[2] == ip[2]
+#endif
+ )
+ {
+ /* match found at *ref++ */
+ unsigned int len = 2;
+ unsigned int maxlen = in_end - ip - len;
+ maxlen = maxlen > MAX_REF ? MAX_REF : maxlen;
+
+ if (expect_false (op + 3 + 1 >= out_end)) /* first a faster conservative test */
+ if (op - !lit + 3 + 1 >= out_end) /* second the exact but rare test */
+ return 0;
+
+ op [- lit - 1] = lit - 1; /* stop run */
+ op -= !lit; /* undo run if length is zero */
+
+ for (;;)
+ {
+ if (expect_true (maxlen > 16))
+ {
+ len++; if (ref [len] != ip [len]) break;
+ len++; if (ref [len] != ip [len]) break;
+ len++; if (ref [len] != ip [len]) break;
+ len++; if (ref [len] != ip [len]) break;
+
+ len++; if (ref [len] != ip [len]) break;
+ len++; if (ref [len] != ip [len]) break;
+ len++; if (ref [len] != ip [len]) break;
+ len++; if (ref [len] != ip [len]) break;
+
+ len++; if (ref [len] != ip [len]) break;
+ len++; if (ref [len] != ip [len]) break;
+ len++; if (ref [len] != ip [len]) break;
+ len++; if (ref [len] != ip [len]) break;
+
+ len++; if (ref [len] != ip [len]) break;
+ len++; if (ref [len] != ip [len]) break;
+ len++; if (ref [len] != ip [len]) break;
+ len++; if (ref [len] != ip [len]) break;
+ }
+
+ do
+ len++;
+ while (len < maxlen && ref[len] == ip[len]);
+
+ break;
+ }
+
+ len -= 2; /* len is now #octets - 1 */
+ ip++;
+
+ if (len < 7)
+ {
+ *op++ = (off >> 8) + (len << 5);
+ }
+ else
+ {
+ *op++ = (off >> 8) + ( 7 << 5);
+ *op++ = len - 7;
+ }
+
+ *op++ = off;
+ lit = 0; op++; /* start run */
+
+ ip += len + 1;
+
+ if (expect_false (ip >= in_end - 2))
+ break;
+
+#if ULTRA_FAST || VERY_FAST
+ --ip;
+# if VERY_FAST && !ULTRA_FAST
+ --ip;
+# endif
+ hval = FRST (ip);
+
+ hval = NEXT (hval, ip);
+ htab[IDX (hval)] = ip;
+ ip++;
+
+# if VERY_FAST && !ULTRA_FAST
+ hval = NEXT (hval, ip);
+ htab[IDX (hval)] = ip;
+ ip++;
+# endif
+#else
+ ip -= len + 1;
+
+ do
+ {
+ hval = NEXT (hval, ip);
+ htab[IDX (hval)] = ip;
+ ip++;
+ }
+ while (len--);
+#endif
+ }
+ else
+ {
+ /* one more literal byte we must copy */
+ if (expect_false (op >= out_end))
+ return 0;
+
+ lit++; *op++ = *ip++;
+
+ if (expect_false (lit == MAX_LIT))
+ {
+ op [- lit - 1] = lit - 1; /* stop run */
+ lit = 0; op++; /* start run */
+ }
+ }
+ }
+
+ if (op + 3 > out_end) /* at most 3 bytes can be missing here */
+ return 0;
+
+ while (ip < in_end)
+ {
+ lit++; *op++ = *ip++;
+
+ if (expect_false (lit == MAX_LIT))
+ {
+ op [- lit - 1] = lit - 1; /* stop run */
+ lit = 0; op++; /* start run */
+ }
+ }
+
+ op [- lit - 1] = lit - 1; /* end run */
+ op -= !lit; /* undo run if length is zero */
+
+ return op - (u8 *)out_data;
+}
+
diff --git a/lib/liblzf-3.5/lzf_d.c b/lib/liblzf-3.5/lzf_d.c
new file mode 100644
index 0000000000..9e2cd829c3
--- /dev/null
+++ b/lib/liblzf-3.5/lzf_d.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2000-2007 Marc Alexander Lehmann <schmorp@schmorp.de>
+ *
+ * Redistribution and use in source and binary forms, with or without modifica-
+ * tion, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
+ * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
+ * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
+ * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License ("GPL") version 2 or any later version,
+ * in which case the provisions of the GPL are applicable instead of
+ * the above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the BSD license, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file under
+ * either the BSD or the GPL.
+ */
+
+#include "lzfP.h"
+
+#if AVOID_ERRNO
+# define SET_ERRNO(n)
+#else
+# include <errno.h>
+# define SET_ERRNO(n) errno = (n)
+#endif
+
+#if (__i386 || __amd64) && __GNUC__ >= 3
+# define lzf_movsb(dst, src, len) \
+ asm ("rep movsb" \
+ : "=D" (dst), "=S" (src), "=c" (len) \
+ : "0" (dst), "1" (src), "2" (len));
+#endif
+
+unsigned int
+lzf_decompress (const void *const in_data, unsigned int in_len,
+ void *out_data, unsigned int out_len)
+{
+ u8 const *ip = (const u8 *)in_data;
+ u8 *op = (u8 *)out_data;
+ u8 const *const in_end = ip + in_len;
+ u8 *const out_end = op + out_len;
+
+ do
+ {
+ unsigned int ctrl = *ip++;
+
+ if (ctrl < (1 << 5)) /* literal run */
+ {
+ ctrl++;
+
+ if (op + ctrl > out_end)
+ {
+ SET_ERRNO (E2BIG);
+ return 0;
+ }
+
+#if CHECK_INPUT
+ if (ip + ctrl > in_end)
+ {
+ SET_ERRNO (EINVAL);
+ return 0;
+ }
+#endif
+
+#ifdef lzf_movsb
+ lzf_movsb (op, ip, ctrl);
+#else
+ do
+ *op++ = *ip++;
+ while (--ctrl);
+#endif
+ }
+ else /* back reference */
+ {
+ unsigned int len = ctrl >> 5;
+
+ u8 *ref = op - ((ctrl & 0x1f) << 8) - 1;
+
+#if CHECK_INPUT
+ if (ip >= in_end)
+ {
+ SET_ERRNO (EINVAL);
+ return 0;
+ }
+#endif
+ if (len == 7)
+ {
+ len += *ip++;
+#if CHECK_INPUT
+ if (ip >= in_end)
+ {
+ SET_ERRNO (EINVAL);
+ return 0;
+ }
+#endif
+ }
+
+ ref -= *ip++;
+
+ if (op + len + 2 > out_end)
+ {
+ SET_ERRNO (E2BIG);
+ return 0;
+ }
+
+ if (ref < (u8 *)out_data)
+ {
+ SET_ERRNO (EINVAL);
+ return 0;
+ }
+
+#ifdef lzf_movsb
+ len += 2;
+ lzf_movsb (op, ref, len);
+#else
+ *op++ = *ref++;
+ *op++ = *ref++;
+
+ do
+ *op++ = *ref++;
+ while (--len);
+#endif
+ }
+ }
+ while (ip < in_end);
+
+ return op - (u8 *)out_data;
+}
+
diff --git a/lib/mesos.jar b/lib/mesos.jar
new file mode 100644
index 0000000000..60d299c8af
--- /dev/null
+++ b/lib/mesos.jar
Binary files differ
diff --git a/lib/scalacheck_2.8.0-1.7.jar b/lib/scalacheck_2.8.0-1.7.jar
new file mode 100644
index 0000000000..fb3c0e9e12
--- /dev/null
+++ b/lib/scalacheck_2.8.0-1.7.jar
Binary files differ
diff --git a/lib/scalatest-1.2/LICENSE b/lib/scalatest-1.2/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/lib/scalatest-1.2/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/lib/scalatest-1.2/NOTICE b/lib/scalatest-1.2/NOTICE
new file mode 100644
index 0000000000..a405cbd58a
--- /dev/null
+++ b/lib/scalatest-1.2/NOTICE
@@ -0,0 +1,7 @@
+================================================================================
+== NOTICE file corresponding to section 4(d) of the Apache License, ==
+== Version 2.0, in this case for the ScalaTest distribution. ==
+================================================================================
+
+ - This product includes software developed by
+ Artima, Inc. (http://www.artima.com/).
diff --git a/lib/scalatest-1.2/README.txt b/lib/scalatest-1.2/README.txt
new file mode 100644
index 0000000000..d505b9c640
--- /dev/null
+++ b/lib/scalatest-1.2/README.txt
@@ -0,0 +1,58 @@
+ScalaTest 1.0
+
+ScalaTest is a free, open-source testing toolkit for Scala and
+Java programmers. Because different developers take different approaches to creating
+software, no single approach to testing is a good fit for everyone. In light of
+this reality, ScalaTest is designed to facilitate different styles of testing. ScalaTest
+provides several traits that you can mix together into whatever combination makes you feel the most productive.
+For some examples of the various styles that ScalaTest supports, see:
+
+http://www.artima.com/scalatest
+
+GETTING STARTED
+
+To learn how to use ScalaTest, please
+open in your browser the scaladoc documentation in the
+/scalatest-1.0/doc directory. Look first at the documentation for trait
+org.scalatest.Suite, which gives a decent intro. All the other types are
+documented as well, so you can hop around to learn more.
+org.scalatest.tools.Runner explains how to use the application. The
+Ignore class is written in Java, and isn't currently shown in the Scaladoc.
+
+To try it out, you can use ScalaTest to run its own tests, i.e., the tests
+used to test ScalaTest itself. This command will run the GUI:
+
+scala -classpath scalatest-1.0.jar org.scalatest.tools.Runner -p "scalatest-1.0-tests.jar" -g -s org.scalatest.SuiteSuite
+
+This command will run and just print results to the standard output:
+
+scala -classpath scalatest-1.0.jar org.scalatest.tools.Runner -p "scalatest-1.0-tests.jar" -o -s org.scalatest.SuiteSuite
+
+ScalaTest 1.0 was tested with Scala version 2.7.5.final, so it is not
+guaranteed to work with earlier Scala versions.
+
+ABOUT SCALATEST
+
+ScalaTest was written by Bill Venners, George Berger, Josh Cough, and
+other contributors starting in late 2007. ScalaTest, which is almost
+exclusively written in Scala, follows and improves upon the Java code
+and design of Artima SuiteRunner, a testing tool also written
+primarily by Bill Venners, starting in 2001. Over the years a few
+other people contributed to SuiteRunner as well, including:
+
+Mark Brouwer
+Chua Chee Seng
+Chris Daily
+Matt Gerrans
+John Mitchel
+Frank Sommers
+
+Several people have helped with ScalaTest, including:
+
+Corey Haines
+Colin Howe
+Dianne Marsh
+Joel Neely
+Jon-Anders Teigen
+Daniel Watson
+
diff --git a/lib/scalatest-1.2/scalatest-1.2.jar b/lib/scalatest-1.2/scalatest-1.2.jar
new file mode 100644
index 0000000000..cb8db9bdf5
--- /dev/null
+++ b/lib/scalatest-1.2/scalatest-1.2.jar
Binary files differ
diff --git a/lib/slf4j-1.6.1/slf4j-api-1.6.1.jar b/lib/slf4j-1.6.1/slf4j-api-1.6.1.jar
new file mode 100644
index 0000000000..42e0ad0de7
--- /dev/null
+++ b/lib/slf4j-1.6.1/slf4j-api-1.6.1.jar
Binary files differ
diff --git a/lib/slf4j-1.6.1/slf4j-log4j12-1.6.1.jar b/lib/slf4j-1.6.1/slf4j-log4j12-1.6.1.jar
new file mode 100644
index 0000000000..873d11983e
--- /dev/null
+++ b/lib/slf4j-1.6.1/slf4j-log4j12-1.6.1.jar
Binary files differ